diff --git "a/2744.jsonl" "b/2744.jsonl" new file mode 100644--- /dev/null +++ "b/2744.jsonl" @@ -0,0 +1,1169 @@ +{"seq_id":"40356150197","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/2/26 14:43\n# @Author : Joan\n# @Email : sj11249187@126.com\n# @File : probudget.py\n# @Software: PyCharm\n\nimport numpy as np\nimport json\nimport pandas as pd\nimport time\n\n\ndef readJson(path, filename):\n with open(path + '\\\\' + filename, encoding='utf-8') as f:\n data = json.load(f)\n data = pd.DataFrame(data['RECORDS'])\n return data\n\n\ndef qd_de_mix(df):\n '''\n 将项(索引)与所套定额(索引)绑定在一起,项为键,定额为值\n :param df: 项目案例数据dataframe格式,且只留项与定额\n :return:\n '''\n # 求zmkind 差值,zmkind中10-定额;50-项,项\n zmKind_diff = np.diff(df.zmKind)\n # 取连续值的最后一个的索引\n pos, = np.where(zmKind_diff)\n # 如果差值最后一位等于0,则索引pos最后增加zmKind最后一位的索引\n if (len(zmKind_diff) > 1) and (zmKind_diff[-1] == 0):\n pos = np.append(pos, len(df.zmKind)-1)\n # 将索引pos转换为nx2的数组,[i][0]表示项,[i][1]表示定额\n if len(pos) % 2 != 0:\n pos = pos[:-1]\n pos_array = pos.reshape(len(pos)//2, 2)\n # 将数组转换为字典:键:项(连续项的最后一项)索引,值:定额(连续定额的最后一项)索引\n qd_de_last = {}\n for i in range(len(pos_array)):\n qd_de_last[pos_array[i][0]] = pos_array[i][1]\n # 定额索引补齐: 键:项索引,值:项所套全部定额索引\n qd_de_defill = {}\n for qd, de in qd_de_last.items():\n if de - qd > 1:\n qd_de_defill[qd] = list(range(qd+1, de+1))\n else:\n qd_de_defill[qd] = [de]\n\n return qd_de_defill\n\n\ndef qd_no_fill(df, qd_no_defill):\n '''\n 项编号补齐\n :return:\n '''\n # 清单索引减一函数\n sub = lambda x: x-1\n\n for qdsy in qd_no_defill.keys():\n # qdsyup临时存放清单索引值以便于向上索引,初始值置为当前清单索引\n qdsyup = qdsy\n # 当前清单编号\n qdno = df.loc[qdsy, 'no']\n # 上一级清单编号,初始值置为当前清单编号\n no = [str(qdno)]\n n = df.loc[qdsy, 'level']\n while n > 1:\n qdsyup = sub(qdsyup)\n qdupl = df.loc[qdsyup, 'level']\n if qdupl < n:\n n -= 1\n if (qdupl == n) and (df.loc[qdsyup, 'zmKind'] != 10 ):\n no.append(str(df.loc[qdsyup, 'no']))\n nofill = no[::-1]\n if len(nofill) == 0:\n df.loc[qdsy, 'nofill'] = ''\n else:\n df.loc[qdsy, 'nofill'] = '-'.join(nofill)\n return df\n\n\ndef same_qd(df, qd_de_defill):\n '''\n 相同项所套定额\n :param df:\n :param qd_de_defill: 项索引--套用的定额索引字典\n :return: dataframe\n '''\n returnData = pd.DataFrame([], columns=['no', 'name', 'mark', 'unit', 'unitPrice',\n 'costfileName', 'editTime', 'roadLevel', 'workSpale'])\n # 键:项索引 值:项name\n qd_name = {}\n for qdsy in qd_de_defill.keys():\n qd_name[qdsy] = df.loc[qdsy, 'name']\n # 键:项name 值:项索引 相同项名-项索引\n name_qd = {}\n for qdsy, name in qd_name.items():\n name_qd.setdefault(name, []).append(qdsy)\n\n # 键:项索引,值:定额编号\n qdsy_deno = {}\n for k, v in qd_de_defill.items():\n qdsy_deno[k] = []\n for vi in v:\n qdsy_deno[k].append(df.loc[vi, 'no'])\n # 键:定额编号 值:相同定额的项索引列表\n deno_qdsylist = {}\n for k, v in qdsy_deno.items():\n deno_qdsylist.setdefault(tuple(v), []).append(k)\n # 去除相同定额 键:项name 值:项索引\n qdname_qdsy = {}\n for kname, vqdsy in name_qd.items():\n qdname_qdsy[kname] = []\n for v in deno_qdsylist.values():\n if set(vqdsy) & set(v):\n if len(v) > 1:\n vs = []\n for vi in v:\n if df.loc[vi, 'name'] == kname:\n vs.append(vi)\n qdname_qdsy[kname].append(vs[0])\n else:\n qdname_qdsy[kname].extend(v)\n\n # 对每一个sameqdname 查找对应的qdsy\n for qdname in qdname_qdsy.keys():\n # 对每一个qdname查找对应qdsy列表\n qdsylist = qdname_qdsy[qdname]\n for i in range(len(qdsylist)):\n qdsy = qdsylist[i]\n desys = qd_de_defill[qdsy]\n qd = [df.loc[qdsy, 'nofill'], df.loc[qdsy, 'name'], df.loc[qdsy, 'mark'], df.loc[qdsy, 'unit'], df.loc[qdsy, 'unitPrice'],\n df.loc[qdsy, 'costfileName'], df.loc[qdsy, 'editTime'], df.loc[qdsy, 'roadLevel'],\n df.loc[qdsy, 'workSpale']]\n returnData.loc[str(qdname) + '@项%d' % (i + 1), :] = qd\n\n for j in range(len(desys)):\n desy = desys[j]\n de = [df.loc[desy, 'no'], df.loc[desy, 'name'], df.loc[desy, 'mark'], df.loc[desy, 'unit'], df.loc[desy, 'unitPrice'],\n df.loc[desy, 'costfileName'], df.loc[desy, 'editTime'], df.loc[desy, 'roadLevel'],\n df.loc[desy, 'workSpale']]\n returnData.loc[str(qdname) + '@项%d套定额%d' % (i + 1, j + 1), :] = de\n\n return returnData\n\n\nif __name__ == '__main__':\n path = r'D:\\toone\\项目案例\\201902概预算\\第二次\\概预算'\n to_path = r'D:\\toone\\项目案例\\201902概预算\\第二次\\概预算\\结果v2'\n data = readJson(path, '概预算.json')\n data.loc[data['mark'] == \"计算项\", 'zmKind'] = 10\n print('程序执行开始时间:%s' % time.asctime())\n # data.head(2000).to_excel(to_path + '\\data.xlsx')\n data = data[data['costfileName'].str.contains('预')]\n data = data[data['zmKind'].isin([10, 50])]\n data['name'].fillna('', inplace=True)\n data['roadLevel'].fillna('空', inplace=True)\n data['workSpale'].fillna('空', inplace=True)\n\n for m in list(set(data['roadLevel'])):\n writer = pd.ExcelWriter(to_path + '\\%s.xlsx' % m, engine='openpyxl')\n for n in list(set(data['workSpale'])):\n datadf = data[(data['roadLevel'] == m) & (data['workSpale'] == n)]\n if datadf.empty:\n continue\n else:\n df = datadf.reset_index()\n qddefill = qd_de_mix(df)\n qdnofill = qd_no_fill(df, qddefill)\n result = same_qd(qdnofill, qddefill)\n result.to_excel(writer, sheet_name='%s' % n)\n writer.close()\n print('程序执行完成时间:%s' % time.asctime())\n\n\n","repo_name":"KingJoan/projectExample","sub_path":"201902yusuan/probudget.py","file_name":"probudget.py","file_ext":"py","file_size_in_byte":6614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12025667379","text":"from src.read.reader_base import ReaderBase\nfrom src.alert.tickers import alerters\n\n\nclass Profile(ReaderBase):\n PIN_EMOJI_UNICODE = u'\\U0001F4CD'\n INTERESTING_KEYS = ['website', 'phone', 'email', 'facebook', 'linkedin', 'twitter', 'businessDesc']\n\n @staticmethod\n def get_nested_keys():\n return {'officers': [list, dict, 'name'],\n 'premierDirectorList': [list, dict, 'name'],\n 'standardDirectorList': [list, dict, 'name'],\n 'auditors': [list, dict, 'name'],\n 'investorRelationFirms': [list, dict, 'name'],\n 'legalCounsels': [list, dict, 'name'],\n 'investmentBanks': [list, dict, 'name'],\n 'corporateBrokers': [list, dict, 'name'],\n 'notes': [list],\n 'otherSecurities': [list, dict, 'name'],\n 'otcAward': [dict, 'best50'],\n \"indexStatuses\": [list, dict, 'indexName'],\n }\n\n @staticmethod\n def get_drop_keys():\n return ['securities', 'isProfileVerified', 'isCaveatEmptor', 'isShell', 'isBankrupt', 'unableToContact',\n 'isDark', 'numberOfRecordShareholders', 'profileVerifiedAsOfDate', 'tierCode', 'tierStartDate',\n 'estimatedMarketCapAsOfDate', 'estimatedMarketCap']\n\n def generate_info(self, exclude=None, escape_markdown=False):\n \"\"\"\n :param escape_markdown:\n :param exclude: List of keys to exclude\n \"\"\"\n features = [alerters.Profile.format_address(self.get_latest(), is_paddding=True)] + \\\n [value for key, value in self.get_latest().items() if key in set(self.INTERESTING_KEYS).difference(set(exclude))]\n msg = f'{self.PIN_EMOJI_UNICODE} ' + f'\\n{self.PIN_EMOJI_UNICODE} '.join(features)\n\n return self.escape_markdown(msg) if escape_markdown else msg\n","repo_name":"roihala/stocker","sub_path":"src/read/readers/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2347927160","text":"from sklearn.feature_extraction import image\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import asarray\nfrom numpy import savez_compressed, savez\n\np = 12\n\n# one_image = Image.open(\"LC08_L1TP_184029_20160616_20170324_01_T1_B4.tif\")\none_image = Image.open(\"LC08_L1TP_184029_20170806_20170813_01_T1_B4.TIF\")\n\n#one_image = one_image.resize((2800,2800), Image.NEAREST)\none_image = np.array(one_image)\n# A = one_image[1500:6000, 1500:6000]\nA = one_image[2200:2500, 3350:3650]\n#A=one_image\n# A = (A-np.amin(A))/(np.amax(A)-np.amin(A))\n\nplt.gray()\nplt.imshow(A)\nplt.show()\n\n[L1, L2] = np.shape(A)\nl1 = int(L1//p)\nl2 = int(L2//p)\n\n\n# for i in range(l1):\n# for j in range(l2):\n# patch = A[i*p:(i+1)*p, j*p:(j+1)*p]\n# patch = (patch - patch.min()) / (patch.max() - patch.min())\n# np.save('./patches_inainte_alunecare_12x12/patch'+str(i)+'_'+str(j)+'.npy', patch)\n\n# B = np.load('./patches/patch0_0.npy')\n\n'''\nprint(L1, L2)\nprint(l1,l2)\n\nL1 = 100\nL2 = 100\n'''\n\nfor i in range(int(p/2),L1-int(p/2),2):\n for j in range(int(p/2),L2-int(p/2),2):\n #print(i-p/2, i+p/2, j-p/2, j+p/2)\n patch = A[int(i-p/2):int(i+p/2), int(j-p/2):int(j+p/2)]\n patch = (patch - patch.min()) / (patch.max() - patch.min())\n np.save('./patches_img300x300_dupa_12x12/patch'+str(i)+'_'+str(j)+'.npy', patch)\n\n\n# print('Image shape: {}'.format(one_image.shape))\n# print('Image type: {}'.format(type(one_image)))\n\n# patches = image.extract_patches_2d(one_image, (28, 28))\n\n# print('Patches shape: {}'.format(patches.shape))\n# print('Patches len: {}'.format(len(patches)))\n\n#print(patches[0])\n#print(patches[800])\n'''\nfor i in range(len(patches)):\n data = asarray(patches[i])\n savez_compressed('patch'+str(i)+'.npz', data)\n'''\n","repo_name":"marinabarbu/Autoencoder","sub_path":"extract_patches_img_satelitare.py","file_name":"extract_patches_img_satelitare.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22167712508","text":"import sys\r\n\r\nfirst_line = False\r\ngrid = []\r\nswats = []\r\n\r\nfor line in sys.stdin:\r\n if not first_line:\r\n a,b,c = list(map(int,line.split()))\r\n first_line = True\r\n else:\r\n grid.append(list(line.strip()))\r\n swats.append(([0]*b).copy())\r\n\r\nmaxI, maxJ, maxSwats = 0,0,0\r\nfor i in range(a-c+1):\r\n for j in range(b-c+1):\r\n swat = 0\r\n for k in range(1,c-1):\r\n for l in range(1,c-1):\r\n if grid[i+k][j+l] == \"*\":\r\n swat += 1\r\n if swat > maxSwats:\r\n maxSwats = swat\r\n maxI, maxJ = i,j\r\n\r\nprint(maxSwats)\r\nfor i in range(1,c-1):\r\n grid[maxI][maxJ+i] = \"-\"\r\n grid[maxI+c-1][maxJ+i] = \"-\"\r\n grid[maxI+i][maxJ] = \"|\"\r\n grid[maxI+i][maxJ+c-1] = \"|\"\r\ngrid[maxI][maxJ] = \"+\"\r\ngrid[maxI+c-1][maxJ] = \"+\"\r\ngrid[maxI][maxJ+c-1] = \"+\"\r\ngrid[maxI+c-1][maxJ+c-1] = \"+\"\r\n\r\nfor r in grid:\r\n print(\"\".join(r))","repo_name":"RussellDash332/kattis","sub_path":"src/Prozor/prozor.py","file_name":"prozor.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"40"} +{"seq_id":"38557901488","text":"# Below are the import statements \r\n\r\nfrom ibapi.wrapper import *\r\nfrom ibapi.client import *\r\nfrom ibapi.contract import *\r\nfrom ibapi.order import *\r\nfrom threading import Thread\r\nimport queue\r\nimport datetime\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nfrom random import randint\r\n\r\n# Below are the global variables\r\n\r\ntickers = [\r\n\"amazon\",\r\n\"shopify\",\r\n\"the\",\r\n\"square\",\r\n\"mercadolibre\",\r\n\"paypal\",\r\n\"twilio\",\r\n\"mongodb\",\r\n\"arista\",\r\n\"netflix\",\r\n\"apple\",\r\n\"facebook\",\r\n\"paycom\",\r\n\"etsy\",\r\n\"mastercard\",\r\n\"teladoc\",\r\n\"okta\",\r\n\"veeva\",\r\n\"appian\",\r\n\"docusign\",\r\n\"hubspot\",\r\n\"alteryx\",\r\n\"tesla\",\r\n\"walt\",\r\n\"roku\",\r\n\"salesforce.com\",\r\n\"alphabet\",\r\n\"redfin\",\r\n\"nvidia\",\r\n\"datadog\",\r\n\"zscaler\",\r\n\"pinterest\",\r\n\"crowdstrike\",\r\n\"starbucks\",\r\n\"zoom\",\r\n\"axon\",\r\n\"activision\",\r\n\"atlassian\",\r\n\"adobe\",\r\n\"american\",\r\n\"intuitive\",\r\n\"tencent\",\r\n\"take-two\",\r\n\"cloudflare,\",\r\n\"wix.com\",\r\n\"stitch\",\r\n\"fastly\",\r\n\"ulta\",\r\n\"align\",\r\n\"cognex\",\r\n\"workday\",\r\n\"jd.com\",\r\n\"illumina\",\r\n\"fiverr\",\r\n\"monster\",\r\n\"blackline,\",\r\n\"autodesk\",\r\n\"markel\",\r\n\"chipotle\",\r\n\"peloton\",\r\n\"servicenow,\",\r\n\"zendesk\",\r\n\"irobot\",\r\n\"accenture\",\r\n\"skyworks\",\r\n\"match\",\r\n\"proto\",\r\n\"upstart\",\r\n\"sea\",\r\n\"twitter\",\r\n\"editas\",\r\n\"iqiyi\",\r\n\"ipg\",\r\n\"berkshire\",\r\n\"upwork\",\r\n\"idexx\",\r\n\"zillow\",\r\n\"costco\",\r\n\"guardant\",\r\n\"electronic\",\r\n\"splunk\",\r\n\"novocure\",\r\n\"mccormick\",\r\n\"equinix\",\r\n\"nutanix\",\r\n\"masimo\",\r\n\"tractor\",\r\n\"ubiquiti\",\r\n\"airbnb,\",\r\n\"zynga\",\r\n\"healthequity\",\r\n\"invitae\",\r\n\"abiomed\",\r\n\"unity\",\r\n\"varonis\",\r\n\"fortinet\",\r\n\"lululemon\",\r\n\"live\",\r\n\"interactive\",\r\n\"aerovironment\",\r\n\"ii-vi\",\r\n\"lam\",\r\n\"axos\",\r\n\"nike\",\r\n\"nextera\",\r\n\"moderna\",\r\n\"new\",\r\n\"bumble\",\r\n\"jack\",\r\n\"dexcom\",\r\n\"asml\",\r\n\"baozun\",\r\n\"ss&c\",\r\n\"planet\",\r\n\"booking\",\r\n\"palo\",\r\n\"ionis\",\r\n\"gilead\",\r\n\"wingstop\",\r\n\"trex\",\r\n\"lemonade,\",\r\n\"coupang,\",\r\n\"baidu\",\r\n\"intuit\",\r\n\"bluebird\",\r\n\"dassault\",\r\n\"xilinx\",\r\n\"beyond\",\r\n\"kinder\",\r\n\"rollins\",\r\n\"hca\",\r\n\"intercontinental\",\r\n\"texas\",\r\n\"five\",\r\n\"fedex\",\r\n\"hasbro\",\r\n\"vail\",\r\n\"middleby\",\r\n\"bilibili\",\r\n\"logitech\",\r\n\"synopsys\",\r\n\"fubotv,\",\r\n\"gartner\",\r\n\"cintas\",\r\n\"westinghouse\",\r\n\"williams-sonoma\",\r\n\"skechers\",\r\n\"balchem\",\r\n\"freshpet\",\r\n\"boston\",\r\n\"wayfair\",\r\n\"coupa\",\r\n\"hyatt\",\r\n\"nintendo\",\r\n\"uber\",\r\n\"roblox\",\r\n\"ollie's\",\r\n\"goodrx\",\r\n\"copart\",\r\n\"godaddy\",\r\n\"liveperson\",\r\n\"hello\",\r\n\"netease\",\r\n\"rh\",\r\n\"sirius\",\r\n\"sleep\",\r\n\"factset\",\r\n\"alaska\",\r\n\"zebra\",\r\n\"ebay\",\r\n\"alkermes\",\r\n\"alnylam\",\r\n\"amgen\",\r\n\"broadcom\",\r\n\"blackbaud\",\r\n\"first\",\r\n\"chart\",\r\n\"lkq\",\r\n\"mastec\",\r\n\"nice\",\r\n\"nuvasive\",\r\n\"novo\",\r\n\"resmed\",\r\n\"seagen\",\r\n\"shockwave\",\r\n\"vertex\",\r\n\"biogen\",\r\n\"exelixis\",\r\n\"blackberry\",\r\n\"svb\",\r\n\"western\",\r\n\"waste\",\r\n\"biomarin\",\r\n\"caseys\",\r\n\"2u\",\r\n\"anheuser-busch\",\r\n\"unitedhealth\",\r\n\"core\",\r\n\"t-mobile\",\r\n\"criteo\",\r\n\"under\",\r\n\"cboe\",\r\n\"cme\",\r\n\"littelfuse\",\r\n\"old\",\r\n\"grupo\",\r\n\"rpm\",\r\n\"3m\",\r\n\"camping\",\r\n\"cummins\",\r\n\"emergent\",\r\n\"watsco\",\r\n\"oceaneering\",\r\n\"cvs\",\r\n\"marriott\",\r\n\"sherwin-williams\",\r\n\"the\",\r\n\"staar\",\r\n\"nxp\",\r\n\"transdigm\",\r\n\"textron\",\r\n]\r\n\r\n# Global variables for the scraper \r\n\r\ntextContent = [] # Holds the headlines in an array \r\ncycleCount = 0 # Stores the frequency of requests made to the server \r\ntempHeadlineHolder = [] # Array to hold the most recent headline before it has been analyzed\r\n\r\n# Below are the custom classes and methods \r\n\r\ndef economistSearch():\r\n page_link = 'https://www.economist.com/' # Page Url to point request where to crawl \r\n page_response = requests.get(page_link, timeout=20) # Get request to ask for page content\r\n page_content = BeautifulSoup(page_response.content, \"html.parser\") # Ask Beautiful soup to parse for content\r\n\r\n for link in page_content.find_all(\"span\", class_=\"flytitle-and-title__title\", limit = 30): # Finds all the spans with the class flytitle-and-title__title\r\n if link.text not in textContent:\r\n # print(link.text) # Prints the title so we can verify correct operation \r\n textContent.append(link.text) # Appends the headline to our main array \r\n tempHeadlineHolder.append(link.text) \r\n\r\n print(\"Economist Done\")\r\n time.sleep(5) # Creates a crawl delay of 5 seconds (which the Economist requires in their robots.txt file)\r\n\r\ndef CNNSearch():\r\n page_link = 'https://www.cnn.com/specials/last-50-stories' # Page Url to point request where to crawl \r\n page_response = requests.get(page_link, timeout=20) # Get request to ask for page content\r\n page_content = BeautifulSoup(page_response.content, \"html.parser\") # Ask Beautiful soup to parse for content\r\n\r\n for link in page_content.find_all(\"span\", class_=\"cd__headline-text\", limit = 30): # Finds all the spans with the class cd__headline-text\r\n if link.text not in textContent:\r\n # print(link.text) # Prints the title so we can verify correct operation \r\n textContent.append(link.text) # Appends the headline to our main array \r\n tempHeadlineHolder.append(link.text) \r\n print(\"CNN Done\")\r\n\r\ndef ReutersSearch():\r\n page_link = 'https://www.reuters.com/' # Page Url to point request where to crawl \r\n page_response = requests.get(page_link, timeout=20) # Get request to ask for page content\r\n page_content = BeautifulSoup(page_response.content, \"html.parser\") # Ask Beautiful soup to parse for content\r\n\r\n for link in page_content.find_all(\"h3\", class_=\"article-heading\", limit = 30): # Finds h3's with the class article-heading\r\n if link.text not in textContent:\r\n # print(link.text) # Prints the title so we can verify correct operation \r\n textContent.append(link.text) # Appends the headline to our main array \r\n tempHeadlineHolder.append(link.text) \r\n print(\"Reuters Done\")\r\n\r\ndef AlphaSearch():\r\n page_link = 'https://seekingalpha.com/market-news/all' # Page Url to point request where to crawl \r\n page_response = requests.get(page_link, timeout=20) # Get request to ask for page content\r\n page_content = BeautifulSoup(page_response.content, \"html.parser\") # Ask Beautiful soup to parse for content\r\n\r\n for link in page_content.find_all(\"div\", class_=\"media-body\", limit = 30): # Finds divs with the class media-body\r\n if link.div.a.text not in textContent: # Navigates down into the div to get the content in the link and checks if we have seen it before\r\n textContent.append(link.div.a.text) # Appends the title to our master so we can track if we have seen it before \r\n tempHeadlineHolder.append(link.div.a.text) # Appends the title to our temporary holder which has been cleared after the last unique title\r\n print(\"Seeking Alpha Done\")\r\n\r\n\t\r\n# Below is the logic processing area\r\n\r\ndef headlineAnalysis(headline):\r\n\r\n # Splits the headline so we can look for individual word matches\r\n words = headline.split()\r\n\r\n # A simple count to track if the headline contains our keywords\r\n matchScore = 0\r\n\r\n # Iterates over the words in the headline and looks for word matches \r\n for individualWord in words: \r\n if individualWord.lower() in tickers:\r\n print(individualWord.lower())\r\n\r\n\r\n#################\r\n\r\n\r\neconomistSearch() # Calls our main scraping method \r\nCNNSearch()\r\nReutersSearch()\r\nAlphaSearch() \r\nprint(\"Search Done at: \" + str(datetime.datetime.now())) # An optional printout to keep track of how many times the program has run \r\n\r\n# You can append text here to test the algorithm's response to certain cases \r\n#if cycleCount == 12: \r\n\t#tempHeadlineHolder.append(\"Apple exceeds expectations in the latest quarter\")\r\n\r\n# A loop that cycles through our temporary headline holder\r\nfor headline in tempHeadlineHolder:\r\n\tprint(headline)\r\n\theadlineAnalysis(headline)\r\n\r\ntempHeadlineHolder = [] # Reset the headline holder after we have searched the content to avoid repeats \r\n\r\n# (Optional wait time that may be necessary for websites with a crawl delay or bot monitors) \r\ntime.sleep(randint(0,5)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"reubencapio/swingtrading","sub_path":"SentimentAnalysisAndOrdering.py","file_name":"SentimentAnalysisAndOrdering.py","file_ext":"py","file_size_in_byte":7934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14314827849","text":"# noinspection PyUnresolvedReferences\r\nimport pgzrun, random, pygame\r\n\r\nWIDTH = 1200\r\n\r\nHEIGHT = 620\r\n\r\nmusic.play('default')\r\n\r\nchar = Actor('char_0_idle')\r\nchar_0.x = 600\r\nchar_0.y = 570\r\n\r\nchar_0 = Actor('char_0_idle')\r\nchar_0.x = 150\nchar_0.y = 180\r\n\r\nchar_1 = Actor('char_1_idle')\r\nchar_1.x = 350\r\nchar_1.y = 180\r\n\r\nchar_2 = Actor('char_2_idle')\r\nchar_2.x = 550\r\nchar_2 = 180\r\nbounce = False\r\n\r\n\r\nchar_3 = Actor('char_3_idle')\r\nchar_3.x = 750\r\nchar_3.y = 180\r\nc_char_3 = False\r\n\r\n\r\nchar_4 = Actor('char_4_idle')\nchar_4.x = 950\r\nchar_4.y = 180\r\n\r\nfish = Actor('fish')\r\nstore = []\r\n\r\n\r\n\r\n#start screen var\r\nstart_screen = True\r\n#if we want to run out main game code\r\nrun = False\r\n#character select screen\r\ncharacter_menu = False\r\n#music select screen\r\nmusic_screen = False\r\n\r\n\r\nstate = 0\r\n\r\nreset = 0\r\n\r\nstate_images = [\r\n \"right\",\r\n \"left\"\r\n]\r\n\r\n#varibales that deal with player movement\r\nisJump = False\r\njumpCount = 10\r\nleft = False\r\nright = True\r\nvel = 10\r\n#variables for the fish\r\nfall = 5\r\ntime_delay = 1000\r\n#whether to drop a fish or not\r\ndrop = False\r\n#start of fish drop\r\nstart = True\r\n#players score\r\nscore = 0\r\n#amount of player lives\r\nlives = 5\r\n#checks to end the game\r\nend_game = False\r\n#Time delay to Jump again\r\ndelay = 500\r\n\r\n#Jump variable 1\r\nj_var1 = 0.7\r\nj_var2 = 0.9\r\n\r\ntitle = 'cat hopper'\r\n#start button\r\nstart_button = Actor('start')\r\nstart_button.x = 600\r\nstart_button.y = 240\r\n\r\n#go again button\r\ncont_button = Actor('again')\r\ncont_button.x = 600\r\ncont_button.y = 50\r\n\r\n#characters button\r\ncharacter_select = Actor('characters')\r\ncharacter_select.x = 600\r\ncharacter_select.y = 300\r\n\r\n#back buttond\r\nback = Actor('back')\r\nback.x = 100\r\nback.y = 40\r\n\r\n#music_button\r\nmusic_button = Actor('music')\r\nmusic_button.x = 600\r\nmusic_button.y = 360\r\n\r\n#button\r\ndefault_music = Actor('default')\r\ndefault_music.x = 150\r\ndefault_music.y = 250\r\n\r\n#button\r\nmusic_0 = Actor('music_0')\r\nmusic_0.x = 150\r\nmusic_0.y = 300\r\n\r\ndef place_fish():\r\n fish.x = random.randint(50, 1150)\r\n\r\ndef on_mouse_down(pos):\r\n global run, start_screen, lives, vel, end_game, score, fall, start, character_menu, char_0, char_1, state_images, char_2, music_screen, c_char_3, store, j_var1, j_var2, delay\r\n\r\n if start_button.collidepoint(pos):\r\n run = True\r\n start_screen = False\r\n\r\n if cont_button.collidepoint(pos):\r\n run = False\r\n start_screen = True\r\n lives = 5\r\n vel = 10\r\n end_game = False\r\n score = 0\r\n fall = 5\r\n start = True\r\n store = []\r\n\r\n#Character select screen press\r\n if character_select.collidepoint(pos):\r\n run = False\r\n start_screen = False\r\n music_screen = False\r\n character_menu = True\r\n#pressing in the music button\r\n if music_button.collidepoint(pos):\r\n run = False\r\n start_screen = False\r\n character_menu = False\r\n music_screen = True\r\n#pressing on the Music option buttons\r\n if default_music.collidepoint(pos):\r\n music.play('default')\r\n if all_star.collidepoint(pos):\r\n music.play('music_0')\r\n\r\n# pressing on the char0 Icon\r\n if char_0_icon.collidepoint(pos):\r\n char = Actor('idle')\r\n state_images = [\r\n \"right\",\r\n \"left\"\r\n ]\r\n bounce = False\r\n c_char_3 = False\r\n j_var1 = 0.7\r\n j_var2 = 0.9\r\n delay = 500\r\n# pressing on the char1 Icon\r\n if char_1.collidepoint(pos):\r\n char = Actor('char_1_idle')\r\n state_images = [\r\n \"1_left\",\r\n \"1_right\"\r\n ]\r\n bounce = False\r\n c_char_3 = False\r\n j_var1 = 0.7\r\n j_var2 = 0.9\r\n delay = 450\r\n# pressing on the char2 Icon\r\n if char_2.collidepoint(pos):\r\n char = Actor('char_2_idle')\r\n state_images = [\r\n \"2_right\",\r\n \"2_left\"\r\n ]\r\n bounce = True\r\n c_char_3 = False\r\n j_var1 = 0.5\r\n j_var2 =1\r\n delay = 10\r\n# Pressing on char3 Icon\r\n if char_3.collidepoint(pos):\r\n char = Actor('char_3_idle')\r\n state_images = [\r\n \"3_right\",\r\n \"3_left\"\r\n ]\r\n bounce = False\r\n c_char_3 = True\r\n j_var1 = 1\r\n j_var2 = 0.7\r\n delay = 1000\r\n# pressing on char4 Icon\r\n if char_4.collidepoint(pos):\r\n char = Actor(\"char_4_idle\")\r\n state_images =[\r\n \"4_right\",\r\n \"4_left\"\r\n ]\r\n bounce = False\r\n c_char_3 = False\r\n j_var1 = 0.7\r\n j_var2 = 0.9\r\n delay = 500\r\n\r\n if back.collidepoint(pos):\r\n start_screen = True\r\n character_menu = False\r\n music_screen = False\r\n\r\ndef draw():\r\n if start_screen:\r\n screen.clear()\r\n screen.blit('menu_backdrop', (0,0))\r\n screen.blit(title, (0, 0))\r\n char.draw()\r\n start_button.draw()\r\n character_select.draw()\r\n music_button.draw()\r\n\r\n if character_menu:\r\n screen.clear()\r\n screen.blit('menu_backdrop', (0, 0))\r\n back.draw()\r\n #Drawing all the character icons\r\n char_0.icon.draw()\r\n char_1.draw()\r\n char_2.draw()\r\n char_3.draw()\r\n char_4.draw()\r\n char.x = 600\r\n char.y = 570\r\n #drawing the Music screen\r\n if music_screen:\r\n screen.clear()\r\n screen.blit('menu_backdrop', (0, 0))\r\n back.draw()\r\n\r\n default_music.draw()\r\n all_star.draw()\r\n\r\n if run:\r\n screen.clear()\r\n\r\n screen.blit('forest', (0, -55))\r\n\r\n cat.image = state_images[state]\r\n cat.draw()\r\n #drawing the fish\r\n for i in range(0, len(store)):\r\n store[i].draw()\r\n\r\n screen.draw.text(\"Score: \" + str(score), (15, 5))\r\n screen.draw.text(\"Lives: \" + str(lives), (1020, 5))\r\n #END GAME MESSAGE\r\n if end_game:\r\n screen.draw.text(\"YOU LOSE!\", (540, 310))\r\n cont_button.draw()\r\n\r\n\r\n\r\ndef update():\r\n global state, neg, jumpCount, isJump, vel, left, right, reset, jumpReady, time_delay, drop, start, score, \\\r\n fall, lives, end_game, boost, is_boost, boost_end, boss_fight, char, run, state_images, store\r\n\r\n#move right\r\n if run:\r\n if keyboard.d and char.x < 1140:\r\n state = 0\r\n char.x += vel\r\n right = True\r\n left = False\r\n #move left\r\n if keyboard.a and char.x > 50:\r\n state = 1\r\n char.x -= vel\r\n right = False\r\n left = True\r\n #jump code\r\n\r\n if not(isJump) and pygame.time.get_ticks() - reset > delay:\r\n if keyboard.SPACE:\r\n isJump = True\r\n right = False\r\n left = False\r\n jump = True\r\n if not c_char_3:\r\n vel = 15\r\n if c_char_3:\r\n vel = 26\r\n reset = pygame.time.get_ticks()\r\n if isJump:\r\n if jumpCount >= -10:\r\n neg = 1\r\n if jumpCount < 0:\r\n neg = -1\r\n char.y -= (j_var1*jumpCount ** 2) * j_var2 * neg\r\n jumpCount -= 1\r\n else:\r\n isJump = False\r\n if not c_char_3:\r\n vel = 10\r\n if c_char_3:\r\n vel = 7\r\n jumpCount = 10\r\n\r\n #Fish spawner\r\n if start:\r\n store.append(fish)\r\n place_fish()\r\n drop = True\r\n start = False\r\n if drop:\r\n fish.y += fall\r\n #Getting rid of fish if they hit screen border\r\n\r\n if fish.y > 620:\r\n store.pop(0)\r\n drop = False\r\n start = True\r\n fish.y = 0\r\n lives = lives-1\r\n if not c_char_3:\r\n sounds.miss.play()\r\n else:\r\n sounds.miss2.play()\r\n #Hit detection for Fish and Cat\r\n for i in range(len(store)):\r\n if char.colliderect(store[i]):\r\n store.pop(i)\r\n drop = False\r\n start = True\r\n fish.y = 0\r\n score = score+1\r\n #Char Lives\r\n if lives == 0:\r\n start = False\r\n drop = False\r\n end_game = True\r\n #levels of the game\r\n if score > 10:\r\n fall = 6\r\n if score > 20:\r\n fall = 7.5\r\n if score > 30:\r\n fall = 8\r\n if score > 40:\r\n fall = 9\r\n if score > 50:\r\n fall = 10\r\n\r\n\r\n\r\npgzrun.go()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Samuel-Carroll/First-game-pygame","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":8609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39889271260","text":"import matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport numpy as np\nimport sys\n\nif len(sys.argv) == 2:\n folder = 'results-' + str(sys.argv[1])\nelse:\n folder = '' \n\n#plt.rc('text', usetex=True)\nS = 14\t\t# size of labels\n\ndef plot_A(data, output_file):\n\n # initial position of point A\n #A_x = 0.6\n #A_y = 0.2\n\n col_Ax = 1\t\t# number of column with x-coordinate of position of A\n col_Ay = 2 \t\t# number of column with y-coordinate of position of A\n\n fig = plt.figure()\n plot_Ax = fig.add_subplot(211)\n plot_Ay = fig.add_subplot(212)\n\n plot_Ax.plot(data[:, 0], data[:, col_Ax])# - A_x)\n plot_Ay.plot(data[:, 0], data[:, col_Ay])# - A_y)\n\n plot_Ax.set_ylabel('displacement $x$', size=S)\n plot_Ay.set_ylabel('displacement $y$', size=S)\n plot_Ax.set_xlabel('time', size=S)\n plot_Ay.set_xlabel('time', size=S)\n\n #plot_Ax.set_ylim(range)\n\n fig.savefig(output_file, bbox_inches='tight')\n fig.clf()\n\n\ndata_file = folder+'/data.csv'\ndata = np.genfromtxt(data_file, delimiter=';', skip_header=1)\nplot_A(data, folder+'/A_position.png')\n\nN = 4*len(data[:, 0])//5\nend_data = data[N:, :]\nplot_A(end_data, folder+'/end_A_position.png')\n","repo_name":"VojtechKubac/FSI_seminar","sub_path":"CSM_benchmark/displacement_plotter.py","file_name":"displacement_plotter.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"42761973214","text":"\"\"\"Manage Game Window\"\"\"\n\nfrom datetime import datetime\nfrom PyQt5.QtWidgets import QWidget, QMessageBox\n\nfrom src.main.db.dao.game_dao import GameDao as dao\nfrom src.main.db.model.game import Game\nfrom src.main.exceptions import bad_manage_window_type, gather_data_exception, \\\n game_already_exists_exception, incorrect_data_exception\nfrom src.resources.ui import manage_game\n\n\nclass ManageGame(QWidget, manage_game.Ui_Form):\n \"\"\"\n Class is responsible for application manage game window view.\n Makes it possible to add or edit a game.\n \"\"\"\n\n def __init__(self, bgt_window, window_type, *, game_name=None):\n \"\"\"\n Initializes a ManageGameWindow.\n\n :param bgt_window: main window view class.\n :param window_type: type of window to show; possible: 'ADD', 'EDIT'.\n :param game_name: name of the game to edit; available in 'EDIT' window_type.\n \"\"\"\n\n super().__init__()\n\n self.bgt_window = bgt_window\n\n self.setupUi(self)\n self.showMaximized()\n self.setWindowTitle(\"Board Game Timer - Manage Game Window\")\n\n if window_type == \"ADD\":\n self.add_game_setup_ui()\n elif window_type == \"EDIT\":\n self.edit_game_setup_ui(game_name)\n else:\n raise bad_manage_window_type.BadManageWindowTypeException\n\n self.cancelButton.clicked.connect(self.cancel)\n\n def add_game_setup_ui(self):\n \"\"\"\n Setup ui for 'ADD' window type.\n\n :return:\n \"\"\"\n\n self.manageGameLabel.setText(\"ADD GAME\")\n self.manageGameButton.setText(\"ADD GAME\")\n self.manageGameButton.clicked.connect(self.add_game)\n\n def edit_game_setup_ui(self, game_name):\n \"\"\"\n Setup ui for 'EDIT' window type.\n\n :param game_name:\n :return:\n \"\"\"\n\n game: Game = dao.get_game_by_name(game_name)\n\n self.gameNameLineEdit.setText(game_name)\n self.gameNameLineEdit.setDisabled(True)\n self.minPlayersSpinBox.setValue(int(game.min_players))\n self.maxPlayersSpinBox.setValue(int(game.max_players))\n self.roundTimeEdit.setTime(datetime.strptime(game.round_time, '%M:%S').time())\n self.gameTimeEdit.setTime(datetime.strptime(game.game_time, '%M:%S').time())\n self.gameTypeComboBox.setCurrentText(game.game_type)\n\n self.manageGameLabel.setText(\"EDIT GAME\")\n self.manageGameButton.setText(\"EDIT GAME\")\n self.manageGameButton.clicked.connect(self.update_game)\n\n def add_game(self):\n \"\"\"\n Pass request to dao for adding currently defined game.\n\n :return:\n \"\"\"\n\n try:\n game = self.gather_data()\n\n dao.add_game(game)\n\n self.exit_window()\n\n except gather_data_exception.GatherDataException:\n print(\"Error while adding new game. Raised GatherDataException!\")\n except game_already_exists_exception.GameAlreadyExistsException:\n print(\"Error while adding new game. Raised GameAlreadyExistsException!\")\n self.show_incorrect_data_message_box(\"Game of that name already exists. Choose another \"\n \"name of the game or edit the existing one\")\n\n def update_game(self):\n \"\"\"\n Pass request to dao for updating currently defined game.\n\n :return:\n \"\"\"\n\n try:\n game = self.gather_data()\n\n dao.update_game(game)\n\n self.exit_window()\n\n except gather_data_exception.GatherDataException:\n print(\"Error while editing a game. Raised GatherDataException!\")\n\n def gather_data(self) -> Game:\n \"\"\"\n Gathers data currently set in ui form.\n\n :return: gathered data mapped to Game object.\n \"\"\"\n\n try:\n game_name = self.get_game_name()\n min_players = self.get_min_players()\n max_players = self.get_max_players(min_players)\n round_time = self.get_round_time()\n game_time = self.get_game_time()\n game_type = self.get_game_type()\n except incorrect_data_exception.IncorrectDataException as inc_data:\n print(\"Raised an IncorrectDataException!\")\n raise gather_data_exception.GatherDataException from inc_data\n\n game = Game(game_name, min_players, max_players, round_time, game_time, game_type)\n\n return game\n\n def get_game_name(self):\n \"\"\"\n Gather currently set game name in ui form.\n\n :return: got game name\n \"\"\"\n\n game_name = self.gameNameLineEdit.text()\n\n reply = None\n if not isinstance(game_name, str):\n reply = self.show_incorrect_data_message_box(\"Name of a game should be a string.\")\n elif len(game_name) < 1:\n reply = self.show_incorrect_data_message_box(\"Name of a game cannot be empty.\")\n\n if reply == QMessageBox.Ok:\n raise incorrect_data_exception.IncorrectDataException\n\n return game_name\n\n def get_min_players(self):\n \"\"\"\n Gather currently set minimum number of players in ui form.\n\n :return: got min players value\n \"\"\"\n\n min_players = int(self.minPlayersSpinBox.text())\n return min_players\n\n def get_max_players(self, min_players):\n \"\"\"\n Gather currently set maximum number of players in ui form.\n\n :param min_players: use to assert max players number higher than min players number\n :return: got max players value\n \"\"\"\n\n max_players = int(self.maxPlayersSpinBox.text())\n\n reply = None\n if max_players < min_players:\n reply = self.show_incorrect_data_message_box(\"Number of max players must be higher than\"\n \" a number of min players\")\n\n if reply == QMessageBox.Ok:\n raise incorrect_data_exception.IncorrectDataException\n\n return max_players\n\n def get_round_time(self):\n \"\"\"\n Gather currently set round time in ui form.\n\n :return: got round time\n \"\"\"\n\n round_time = self.roundTimeEdit.text()\n return round_time\n\n def get_game_time(self):\n \"\"\"\n Gather currently set game time in ui form.\n\n :return: got game time\n \"\"\"\n\n game_time = self.gameTimeEdit.text()\n return game_time\n\n def get_game_type(self):\n \"\"\"\n Gather currently set game type in ui form.\n\n :return: got game type\n \"\"\"\n\n game_type = self.gameTypeComboBox.currentText()\n return game_type\n\n def show_incorrect_data_message_box(self, message):\n \"\"\"\n Opens dialog box informing about incorrect provided data.\n\n :param message: message to show\n :return:\n \"\"\"\n\n return QMessageBox.critical(self, \"Incorrect data provided.\", message, QMessageBox.Ok)\n\n def cancel(self):\n \"\"\"\n Returns to Main Window.\n\n :return:\n \"\"\"\n\n self.exit_window()\n\n def exit_window(self):\n \"\"\"\n Exits currently window.\n\n :return:\n \"\"\"\n\n ManageGame.close(self)\n self.bgt_window.show()\n","repo_name":"jan-ignatowicz/BoardGameTimerDesktop","sub_path":"src/main/window/manage_game.py","file_name":"manage_game.py","file_ext":"py","file_size_in_byte":7194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11778141649","text":"import time\r\nimport re\r\nfile = open (\"nums1.txt\" , 'r')\r\ntext_file = file.read()\r\nre_text = re.sub(r'[^\\w\\s]','', text_file) #использую регулярку для избавления знаков препинания в заданной последовательности (если они имеются)\r\n\r\n#определяю функцию вывода заданного текста (уже без знаков препинания)\r\nprint(\"последовательность чисел : \", re_text)\r\n#определяю функцию вывода количества чисел в заданной последовательности\r\nnum = len(re_text.split())\r\nprint(\"количество чисел в последовательности :\", num)\r\n\r\nprint(\"Время работы программы: \", time.process_time(), \"seconds\")\r\n\r\n\r\n","repo_name":"art4iba2/laba-2_remastered","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9737211744","text":"import functools\nimport warnings\n\nfrom pymongo import monitoring\nfrom pymongo.collation import (\n Collation,\n CollationCaseFirst, CollationStrength, CollationAlternate,\n CollationMaxVariable)\nfrom pymongo.errors import ConfigurationError\nfrom pymongo.operations import (DeleteMany, DeleteOne, IndexModel, ReplaceOne,\n UpdateMany, UpdateOne)\nfrom pymongo.write_concern import WriteConcern\nfrom test import unittest, client_context\nfrom test.utils import EventListener, ignore_deprecations, rs_or_single_client\n\n\nclass TestCollationObject(unittest.TestCase):\n\n def test_constructor(self):\n self.assertRaises(TypeError, Collation, locale=42)\n # Fill in a locale to test the other options.\n _Collation = functools.partial(Collation, 'en_US')\n # No error.\n _Collation(caseFirst=CollationCaseFirst.UPPER)\n self.assertRaises(TypeError, _Collation, caseLevel='true')\n self.assertRaises(ValueError, _Collation, strength='six')\n self.assertRaises(TypeError, _Collation,\n numericOrdering='true')\n self.assertRaises(TypeError, _Collation, alternate=5)\n self.assertRaises(TypeError, _Collation, maxVariable=2)\n self.assertRaises(TypeError, _Collation, normalization='false')\n self.assertRaises(TypeError, _Collation, backwards='true')\n\n # No errors.\n Collation('en_US', future_option='bar', another_option=42)\n collation = Collation(\n 'en_US',\n caseLevel=True,\n caseFirst=CollationCaseFirst.UPPER,\n strength=CollationStrength.QUATERNARY,\n numericOrdering=True,\n alternate=CollationAlternate.SHIFTED,\n maxVariable=CollationMaxVariable.SPACE,\n normalization=True,\n backwards=True)\n\n self.assertEqual({\n 'locale': 'en_US',\n 'caseLevel': True,\n 'caseFirst': 'upper',\n 'strength': 4,\n 'numericOrdering': True,\n 'alternate': 'shifted',\n 'maxVariable': 'space',\n 'normalization': True,\n 'backwards': True\n }, collation.document)\n\n self.assertEqual({\n 'locale': 'en_US',\n 'backwards': True\n }, Collation('en_US', backwards=True).document)\n\n\ndef raisesConfigurationErrorForOldMongoDB(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n if client_context.version.at_least(3, 3, 9):\n return func(self, *args, **kwargs)\n else:\n with self.assertRaises(ConfigurationError):\n return func(self, *args, **kwargs)\n return wrapper\n\n\nclass TestCollation(unittest.TestCase):\n\n @classmethod\n @client_context.require_connection\n def setUpClass(cls):\n cls.listener = EventListener()\n cls.saved_listeners = monitoring._LISTENERS\n monitoring._LISTENERS = monitoring._Listeners([], [], [], [])\n cls.client = rs_or_single_client(event_listeners=[cls.listener])\n cls.db = cls.client.pymongo_test\n cls.collation = Collation('en_US')\n cls.warn_context = warnings.catch_warnings()\n cls.warn_context.__enter__()\n warnings.simplefilter(\"ignore\", DeprecationWarning)\n\n @classmethod\n def tearDownClass(cls):\n monitoring._LISTENERS = cls.saved_listeners\n cls.warn_context.__exit__()\n cls.warn_context = None\n\n def tearDown(self):\n self.listener.results.clear()\n\n def last_command_started(self):\n return self.listener.results['started'][-1].command\n\n def assertCollationInLastCommand(self):\n self.assertEqual(\n self.collation.document,\n self.last_command_started()['collation'])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_create_collection(self):\n self.db.test.drop()\n self.db.create_collection('test', collation=self.collation)\n self.assertCollationInLastCommand()\n\n # Test passing collation as a dict as well.\n self.db.test.drop()\n self.listener.results.clear()\n self.db.create_collection('test', collation=self.collation.document)\n self.assertCollationInLastCommand()\n\n def test_index_model(self):\n model = IndexModel([('a', 1), ('b', -1)], collation=self.collation)\n self.assertEqual(self.collation.document, model.document['collation'])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_create_index(self):\n self.db.test.create_index('foo', collation=self.collation)\n ci_cmd = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n ci_cmd['indexes'][0]['collation'])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_ensure_index(self):\n self.db.test.ensure_index('foo', collation=self.collation)\n ci_cmd = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n ci_cmd['indexes'][0]['collation'])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_aggregate(self):\n self.db.test.aggregate([{'$group': {'_id': 42}}],\n collation=self.collation)\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n @ignore_deprecations\n def test_count(self):\n self.db.test.count(collation=self.collation)\n self.assertCollationInLastCommand()\n\n self.listener.results.clear()\n self.db.test.find(collation=self.collation).count()\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n def test_count_documents(self):\n self.db.test.count_documents({}, collation=self.collation)\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n def test_distinct(self):\n self.db.test.distinct('foo', collation=self.collation)\n self.assertCollationInLastCommand()\n\n self.listener.results.clear()\n self.db.test.find(collation=self.collation).distinct('foo')\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n def test_find_command(self):\n self.db.test.insert_one({'is this thing on?': True})\n self.listener.results.clear()\n next(self.db.test.find(collation=self.collation))\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n def test_explain_command(self):\n self.listener.results.clear()\n self.db.test.find(collation=self.collation).explain()\n # The collation should be part of the explained command.\n self.assertEqual(\n self.collation.document,\n self.last_command_started()['explain']['collation'])\n\n @raisesConfigurationErrorForOldMongoDB\n @client_context.require_version_max(4, 1, 0, -1)\n def test_group(self):\n self.db.test.group('foo', {'foo': {'$gt': 42}}, {},\n 'function(a, b) { return a; }',\n collation=self.collation)\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n def test_map_reduce(self):\n self.db.test.map_reduce('function() {}', 'function() {}', 'output',\n collation=self.collation)\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n def test_delete(self):\n self.db.test.delete_one({'foo': 42}, collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['deletes'][0]['collation'])\n\n self.listener.results.clear()\n self.db.test.delete_many({'foo': 42}, collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['deletes'][0]['collation'])\n\n self.listener.results.clear()\n self.db.test.remove({'foo': 42}, collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['deletes'][0]['collation'])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_update(self):\n self.db.test.update({'foo': 42}, {'$set': {'foo': 'bar'}},\n collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['updates'][0]['collation'])\n\n self.listener.results.clear()\n self.db.test.save({'_id': 12345}, collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['updates'][0]['collation'])\n\n self.listener.results.clear()\n self.db.test.replace_one({'foo': 42}, {'foo': 43},\n collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['updates'][0]['collation'])\n\n self.listener.results.clear()\n self.db.test.update_one({'foo': 42}, {'$set': {'foo': 43}},\n collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['updates'][0]['collation'])\n\n self.listener.results.clear()\n self.db.test.update_many({'foo': 42}, {'$set': {'foo': 43}},\n collation=self.collation)\n command = self.listener.results['started'][0].command\n self.assertEqual(\n self.collation.document,\n command['updates'][0]['collation'])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_find_and(self):\n self.db.test.find_and_modify({'foo': 42}, {'$set': {'foo': 43}},\n collation=self.collation)\n self.assertCollationInLastCommand()\n\n self.listener.results.clear()\n self.db.test.find_one_and_delete({'foo': 42}, collation=self.collation)\n self.assertCollationInLastCommand()\n\n self.listener.results.clear()\n self.db.test.find_one_and_update({'foo': 42}, {'$set': {'foo': 43}},\n collation=self.collation)\n self.assertCollationInLastCommand()\n\n self.listener.results.clear()\n self.db.test.find_one_and_replace({'foo': 42}, {'foo': 43},\n collation=self.collation)\n self.assertCollationInLastCommand()\n\n @raisesConfigurationErrorForOldMongoDB\n def test_bulk_write(self):\n self.db.test.collection.bulk_write([\n DeleteOne({'noCollation': 42}),\n DeleteMany({'noCollation': 42}),\n DeleteOne({'foo': 42}, collation=self.collation),\n DeleteMany({'foo': 42}, collation=self.collation),\n ReplaceOne({'noCollation': 24}, {'bar': 42}),\n UpdateOne({'noCollation': 84}, {'$set': {'bar': 10}}, upsert=True),\n UpdateMany({'noCollation': 45}, {'$set': {'bar': 42}}),\n ReplaceOne({'foo': 24}, {'foo': 42}, collation=self.collation),\n UpdateOne({'foo': 84}, {'$set': {'foo': 10}}, upsert=True,\n collation=self.collation),\n UpdateMany({'foo': 45}, {'$set': {'foo': 42}},\n collation=self.collation)\n ])\n\n delete_cmd = self.listener.results['started'][0].command\n update_cmd = self.listener.results['started'][1].command\n\n def check_ops(ops):\n for op in ops:\n if 'noCollation' in op['q']:\n self.assertNotIn('collation', op)\n else:\n self.assertEqual(self.collation.document,\n op['collation'])\n\n check_ops(delete_cmd['deletes'])\n check_ops(update_cmd['updates'])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_bulk(self):\n bulk = self.db.test.initialize_ordered_bulk_op()\n bulk.find({'noCollation': 42}).remove_one()\n bulk.find({'noCollation': 42}).remove()\n bulk.find({'foo': 42}, collation=self.collation).remove_one()\n bulk.find({'foo': 42}, collation=self.collation).remove()\n bulk.find({'noCollation': 24}).replace_one({'bar': 42})\n bulk.find({'noCollation': 84}).upsert().update_one(\n {'$set': {'foo': 10}})\n bulk.find({'noCollation': 45}).update({'$set': {'bar': 42}})\n bulk.find({'foo': 24}, collation=self.collation).replace_one(\n {'foo': 42})\n bulk.find({'foo': 84}, collation=self.collation).upsert().update_one(\n {'$set': {'foo': 10}})\n bulk.find({'foo': 45}, collation=self.collation).update({\n '$set': {'foo': 42}})\n bulk.execute()\n\n delete_cmd = self.listener.results['started'][0].command\n update_cmd = self.listener.results['started'][1].command\n\n def check_ops(ops):\n for op in ops:\n if 'noCollation' in op['q']:\n self.assertNotIn('collation', op)\n else:\n self.assertEqual(self.collation.document,\n op['collation'])\n\n check_ops(delete_cmd['deletes'])\n check_ops(update_cmd['updates'])\n\n @client_context.require_version_max(3, 3, 8)\n def test_mixed_bulk_collation(self):\n bulk = self.db.test.initialize_unordered_bulk_op()\n bulk.find({'foo': 42}).upsert().update_one(\n {'$set': {'bar': 10}})\n bulk.find({'foo': 43}, collation=self.collation).remove_one()\n with self.assertRaises(ConfigurationError):\n bulk.execute()\n self.assertIsNone(self.db.test.find_one({'foo': 42}))\n\n @raisesConfigurationErrorForOldMongoDB\n def test_indexes_same_keys_different_collations(self):\n self.db.test.drop()\n usa_collation = Collation('en_US')\n ja_collation = Collation('ja')\n self.db.test.create_indexes([\n IndexModel('fieldname', collation=usa_collation),\n IndexModel('fieldname', name='japanese_version',\n collation=ja_collation),\n IndexModel('fieldname', name='simple')\n ])\n indexes = self.db.test.index_information()\n self.assertEqual(usa_collation.document['locale'],\n indexes['fieldname_1']['collation']['locale'])\n self.assertEqual(ja_collation.document['locale'],\n indexes['japanese_version']['collation']['locale'])\n self.assertNotIn('collation', indexes['simple'])\n self.db.test.drop_index('fieldname_1')\n indexes = self.db.test.index_information()\n self.assertIn('japanese_version', indexes)\n self.assertIn('simple', indexes)\n self.assertNotIn('fieldname', indexes)\n\n def test_unacknowledged_write(self):\n unacknowledged = WriteConcern(w=0)\n collection = self.db.get_collection(\n 'test', write_concern=unacknowledged)\n with self.assertRaises(ConfigurationError):\n collection.update_one(\n {'hello': 'world'}, {'$set': {'hello': 'moon'}},\n collation=self.collation)\n bulk = collection.initialize_ordered_bulk_op()\n bulk.find({'hello': 'world'}, collation=self.collation).update_one(\n {'$set': {'hello': 'moon'}})\n with self.assertRaises(ConfigurationError):\n bulk.execute()\n update_one = UpdateOne({'hello': 'world'}, {'$set': {'hello': 'moon'}},\n collation=self.collation)\n with self.assertRaises(ConfigurationError):\n collection.bulk_write([update_one])\n\n @raisesConfigurationErrorForOldMongoDB\n def test_cursor_collation(self):\n self.db.test.insert_one({'hello': 'world'})\n next(self.db.test.find().collation(self.collation))\n self.assertCollationInLastCommand()\n","repo_name":"CSUFTitanRover/TitanRover2019","sub_path":"build/resources/mongo-python-driver-master/test/test_collation.py","file_name":"test_collation.py","file_ext":"py","file_size_in_byte":16215,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"36940893919","text":"import vk_api \nfrom vk_api.bot_longpoll import VkBotLongPoll, VkBotEventType\nimport asyncio\nimport time\nfrom .events_handler import EventHandler\nimport logging\nfrom log.loggHandler import ERROR\nfrom logging import Logger\n\nclass Bot(EventHandler):\n\n logg: Logger = None\n \n VK_TOKEN = None\n GROUP_ID = None\n GROUP_NAME = None\n\n isActiveBot = False\n\n def init(self):\n \n self.vk_session = vk_api.VkApi(token=self.VK_TOKEN)\n self.longpoll = VkBotLongPoll(self.vk_session, self.GROUP_ID)\n self.vk = self.vk_session.get_api()\n\n self.logg = logging.getLogger(self.GROUP_NAME)\n self.logg.addHandler(ERROR(vk=self.vk, group_id=self.GROUP_ID))\n\n def start_bot(self):\n\n self.init() \n\n self.logg.info(f\"SUCCESS CONNECTION BOT {self.GROUP_NAME}\")\n\n try:\n \n for event in self.longpoll.listen():\n \n if self.isActiveBot == False:\n self.logg.info(f\"STOPPING BOT {self.GROUP_NAME}\")\n return\n\n if event.type == VkBotEventType.MESSAGE_NEW:\n asyncio.run(self.message_handler(self.vk, event, self.vk_session))\n\n if event.type == VkBotEventType.WALL_POST_NEW and event.obj['from_id'] == -int(self.GROUP_ID):\n asyncio.run(self.wallpost_handler(self.vk, event))\n \n except Exception as e:\n # В случае ошибки, печатаем ее и продолжаем прослушивание\n self.logg.info(e)\n # Пауза перед попыткой переподключения\n time.sleep(5) \n self.start_bot()\n\n ","repo_name":"kukarek/groups","sub_path":"helper_bot/group/bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19285140941","text":"from flask import request\nfrom flask_restplus import Resource\nfrom ..util.dto import SpecieDto\nfrom ..util.decorator import token_required, admin_token_required\nfrom ..services.specie_service import *\n\napi = SpecieDto.api\n_specie = SpecieDto.specie\nparser = SpecieDto.parser\n\n@api.route(\"/\")\nclass NewSpecie(Resource):\n @admin_token_required\n @api.response(201, \"Specie added\")\n @api.doc(\"add a new specie\", parser=parser)\n def post(self):\n specie_data = request.json\n\n return new_specie(data=specie_data)\n\n@api.route(\"/all\")\nclass SpecieList(Resource):\n @token_required\n @api.doc(\"show list of all registered species\")\n @api.marshal_list_with(_specie, envelope=\"data\")\n def get(self):\n return get_all_species()\n\n@api.route(\"/\")\n@api.param(\"public_id\", \"The Specie identifier\")\n@api.response(404, \"Specie not found.\")\nclass SpecieOperations(Resource):\n @admin_token_required\n @api.doc(\"get a specie\")\n @api.marshal_with(_specie)\n def get(self, public_id):\n specie = get_a_specie(public_id)\n\n if not specie:\n api.abort(404)\n\n else:\n return specie\n\n @admin_token_required\n @api.doc(\"delete specie\")\n def delete(self, public_id):\n specie = delete_specie(public_id)\n\n if not specie:\n api.abort(404)\n\n else:\n return specie\n \n @admin_token_required\n @api.doc(\"update specie\")\n def put(self, public_id):\n specie_data = request.json\n\n specie = edit_specie(public_id=public_id, data=specie_data)\n\n if not specie:\n api.abort(404)\n\n else:\n return specie","repo_name":"mariabeaalyssa/Boop_API","sub_path":"app/main/controller/specie_controller.py","file_name":"specie_controller.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12013417115","text":"import json\n\nfrom utils.debug import dbgPrint, dbgMed\nfrom utils.health import printOK\nfrom utils.health import printInfo, printExtraInfo\nfrom utils.health import printError, printExtraError\nfrom utils.redfish import makeRedfishCall, validateField, FIELD, TYPE\n\nchassisURIs = [\n [\"SerialNumber\", str],\n [\"Power\", dict],\n [\"PartNumber\", str],\n [\"Manufacturer\", str],\n [\"Model\", str]\n]\n\ndef checkRedfishChassis(bmcName):\n dbgPrint(dbgMed, \"checkRedfishChassis\")\n badResults = 0\n\n path = \"https://\" + bmcName + \"/redfish/v1/Chassis\"\n dbgPrint(dbgMed, \"checkRedfishChassis checking \" + path)\n payload, label, msg = makeRedfishCall(\"GET\", path)\n\n if not payload:\n printError(\"checkRedfishChassis\")\n printExtraError(label, msg)\n return 1\n\n response = json.loads(payload)\n\n if \"Members\" not in response:\n printError(\"checkRedfishChassis\")\n printExtraError(path + \" .Members\", \"missing\")\n return 1\n\n for member in response[\"Members\"]:\n path = \"https://\" + bmcName + member[\"@odata.id\"]\n dbgPrint(dbgMed, \"checkRedfishChassis checking \" + path)\n payload, label, msg = makeRedfishCall(\"GET\", path)\n\n if not payload:\n printError(\"checkRedfishChassis\")\n printExtraError(label, msg)\n badResults += 1\n continue\n\n mResponse = json.loads(payload)\n\n if (\"ChassisType\" in mResponse and\n (mResponse[\"ChassisType\"] == \"Enclosure\" or\n mResponse[\"ChassisType\"] == \"RackMount\")):\n for check in chassisURIs:\n badResults += validateField(\"checkRedfishChassis\",\n member[\"@odata.id\"], check[FIELD],\n mResponse, check[TYPE])\n else:\n printInfo(\"checkRedfishChassis\")\n printExtraInfo(\"Skipping \"+member[\"@odata.id\"],\n \"URI is for a \" + mResponse[\"ChassisType\"])\n\n if badResults == 0:\n printOK(\"checkRedfishChassis\")\n\n return badResults","repo_name":"Cray-HPE/hms-tools","sub_path":"hwval/validations/redfishmod/chassis.py","file_name":"chassis.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23038949710","text":"import os\n\n# Case100_1.nii.gz Case100_1_0000.nii.gz\nres = ''\ntrain = os.listdir('./Task058_Spine/imagesTr')\nfor i in train:\n tmp = 'mv ' + i + ' ' + i[:-7] + '_0000.nii.gz'\n tmp += '\\n'\n res += tmp\nwith open('./imagesTr.sh', 'w') as f:\n f.write(res)\n\n# Case100_1.nii.gz Case100_1_0000.nii.gz\nres = ''\ntest = os.listdir('./Task058_Spine/imagesTs')\nfor i in test:\n tmp = 'mv ' + i + ' ' + i[:-7] + '_0000.nii.gz'\n tmp += '\\n'\n res += tmp\nwith open('./imagesTs.sh', 'w') as f:\n f.write(res)\n\n# mask_case100_1.nii.gz Case100_1_0000.nii.gz\nres = ''\nlabel = os.listdir('./Task058_Spine/labelsTr')\nfor i in label:\n tmp = 'mv ' + i + ' C' + i[6:]\n tmp += '\\n'\n res += tmp\nwith open('./labelsTr.sh', 'w') as f:\n f.write(res)\n","repo_name":"Vizards8/pytorch-spine-segmentation","sub_path":"nnunet/gen_bat.py","file_name":"gen_bat.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"72399580279","text":"from ideaToText import Decision\n\nclass CreateTriangle(Decision):\n\n def registerChoices(self):\n self.addChoice('makesTriangle', {\n 'hasTriangle': 40,\n 'noTriangle': 20\n })\n\n self.addChoice('triangleUsesForLoop', {\n 'usesLoopForTriangle': 80,\n 'noLoopForTriangle': 1\n })\n\n self.addChoice('randomOrOneSide', {\n 'oneSide': 70,\n 'random': 40\n })\n\n def updateRubric(self):\n if self.getChoice('makesTriangle') == 'noTriangle':\n self.turnOnRubric('triangle-none')\n elif self.getChoice('triangleUsesForLoop') == 'noLoopForTriangle':\n self.turnOnRubric('triangle-unrolled')\n\n def render(self):\n triangleForLoop = self.getChoice('triangleUsesForLoop')\n makesTriangle = self.getChoice('makesTriangle')\n randomOrOneSide = self.getChoice('randomOrOneSide')\n\n if makesTriangle == 'hasTriangle':\n if triangleForLoop == 'noLoopForTriangle':\n return '''\n {DrawSide}\n {DrawSide}\n {DrawSide}\n '''\n else:\n return '''\n Repeat({NumSides}) {{\n {DrawSide}\n }}\n '''\n if randomOrOneSide == 'oneSide':\n return '{DrawSide}'\n return ''\n","repo_name":"TylerYep/sage","sub_path":"generate/grammars/p3/createTriangle.py","file_name":"createTriangle.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18791060567","text":"import time\nimport math\nimport pandas as pd\nimport multiprocessing\nimport numpy as np\n\n\ndf = pd.DataFrame(np.random.rand(10, 1))\nresults = []\n\ndef split_dataframe_by_position(df, splits_n):\n \"\"\"Takes a dataframe and an integer of the number of splits to create.\n Returns a list of dataframes.\"\"\"\n dataframes = []\n index_to_split = math.ceil(len(df)/splits_n)\n # splits\n start = 0\n end = index_to_split\n for split in range(splits_n):\n temporary_df = df.iloc[start:end, :]\n dataframes.append(temporary_df)\n start += index_to_split\n end += index_to_split\n return dataframes\n\ndef processData(df, num):\n \"\"\"Does some compute intensive operation on the data frame.\n Returns a list.\"\"\"\n time.sleep(1)\n print(\"processData\\n\")\n df = df + num\n return df\n\n\ndef collect_results(result):\n \"\"\"Uses apply_async's callback to setup up a separate Queue for each process\"\"\"\n print(\"subprocess\")\n results.append(result)\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n\n # Repeats the compute intensive operation on 10 data frames concurrently\n processes_num = multiprocessing.cpu_count()\n df_list = split_dataframe_by_position(df, processes_num)\n print([len(item) for item in df_list])\n\n pool = multiprocessing.Pool(processes=processes_num)\n for i in range(processes_num):\n res = pool.apply_async(processData, args=(df_list[i], 2), callback=collect_results)\n print(res)\n print(\"res.get()\", res.get())\n pool.close()\n pool.join()\n\n # Converts list of lists to a data frame\n print(results)\n df = pd.concat(results)\n print(df)\n print(df.shape)\n print(\"--- %s seconds ---\" % (time.time() - start_time))","repo_name":"rongxiang1986/Python","sub_path":"moduleExamples/multiprocessingExample/example1.py","file_name":"example1.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"7800968579","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rcParams[\"figure.figsize\"] = [7.00, 3.50]\nplt.rcParams[\"figure.autolayout\"] = True\n\nx = np.linspace(-3, 3, 3)\ny = np.linspace(-3, 3, 3)\n\nx, y = np.meshgrid(x, y)\n\nplane_equation = 0.12 * x + 0.01 * y + 1.09\n\nfig = plt.figure()\n\nax = plt.axes(projection='3d')\n\nax.plot_surface(x, y, plane_equation, color='red')\n\nax.set_xlim(-10, 10) \nax.set_ylim(-10, 10) \nax.set_zlim(-10, 10)\n\n\nplt.show()\n","repo_name":"AleksandarLukic96/02507_Project_ImageAnalysis_ComputerGraphics","sub_path":"plane.py","file_name":"plane.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"43280320075","text":"import locale\n\nclass FilingStatus:\n SINGLE=0\n JOINT=1\n SEPARATE=2\n HEAD=3\n WIDOW=4\n\nclass Form(object):\n def __init__(self, inputs):\n self.data = {}\n self.must_file = False\n self.forms = []\n name = self.__class__.__name__\n if name in inputs:\n for k in inputs[name]:\n self[k] = inputs[name][k]\n\n def addForm(self, form):\n if form.mustFile():\n self.forms.append(form)\n\n def get(self, i):\n if i in self.data:\n return self.data[i]\n else:\n return None\n\n def __contains__(self, i):\n return i in self.data\n\n def __setitem__(self, i, val):\n if val is None:\n if i in self.data:\n del self.data[i]\n else:\n self.data[i] = int(round(val))\n\n def __getitem__(self, i):\n x = self.data.get(i)\n if x is None:\n return 0\n else:\n return x\n\n def mustFile(self):\n return self.must_file\n\n def rowsum(self, rows):\n val = 0\n isNone = True\n for r in rows:\n if r in self:\n isNone = False\n val += self[r]\n return None if isNone else val\n\n def spouseSum(self, inputs, field):\n if field not in inputs:\n return None\n if inputs['status'] == FilingStatus.JOINT:\n return inputs[field][0] + inputs[field][1]\n else:\n return inputs[field]\n\n def printForm(self):\n def keynormalize(a):\n s = ''\n numstr = ''\n out = []\n for c in a:\n if c.isdigit():\n if s:\n out.append(s)\n s = ''\n numstr += c\n else:\n if numstr:\n out.append(int(numstr))\n numstr = ''\n s += c\n if s:\n out.append(s)\n if numstr:\n out.append(int(numstr))\n return out\n\n locale.setlocale(locale.LC_ALL, '')\n print('%s:' % self.title())\n keys = self.data.keys()\n keys.sort(key=keynormalize)\n for k in keys:\n print(' %4s %11s' % (k, locale.format('%d', self[k], 1)))\n\n def printAllForms(self):\n for f in self.forms:\n f.printForm()\n","repo_name":"davidcmoore/python-taxes","sub_path":"2012/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"40"} +{"seq_id":"73458848121","text":"import os\nfrom dnslib import DNSRecord\nimport socket\nimport tldextract\n\nR = '\\033[31m' # red\nG = '\\033[32m' # green\nC = '\\033[36m' # cyan\nW = '\\033[0m' # white\nY = '\\033[33m' # yellow\n\n\ndef write_dns_to_file(final_dns, final_dmarc, output_file):\n with open(output_file, 'w+') as f:\n for data in final_dns:\n print(data, file=f)\n\n for data in final_dmarc:\n print(data, file=f)\n print(R + '\\t[->]' + Y + 'Result have been saved in the file {}'.format(output_file))\n\n\ndef dns_enum(target, output):\n final_dns=[]\n final_result=[]\n url_parts=tldextract.extract(target)\n target=\".\".join(url_parts[-2:])\n\n print(G+'\\n[+]'+R+' Starting DNS Enumeration\\n')\n types=['A','AAAA', 'CNAME', 'MX', 'NS', 'TXT']\n forward_addr=('8.8.8.8', 53)\n client=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n for type in types:\n q = DNSRecord.question(target, type)\n client.sendto(bytes(q.pack()), forward_addr)\n data, _ = client.recvfrom(1024)\n d = DNSRecord.parse(data)\n d=str(d).split('\\n')\n final_result.extend(d)\n\n final_result=set(final_result)\n\n for each_dns in final_result:\n if each_dns.startswith(';') == False:\n final_dns.append(each_dns)\n\n if len(final_dns) != 0:\n for entry in final_dns:\n print(G+'[+] '+C+f'{entry}')\n else:\n print(R+'[!] DNS Record Not found..!!!')\n\n dmarc_target='_dmarc.'+target\n q = DNSRecord.question(dmarc_target, 'TXT')\n packet = q.send('8.8.8.8', 53, tcp='UDP')\n dmarc_answer = DNSRecord.parse(packet)\n dmarc_answer = str(dmarc_answer).split('\\n')\n final_dmarc=[]\n\n for each_dmarc in dmarc_answer:\n if each_dmarc.startswith('_dmarc') == True:\n final_dmarc.append(each_dmarc)\n\n if len(final_dmarc) != 0:\n for entry in final_dmarc:\n print(G+'[+] '+C+f'{entry}')\n print(R+'-'*20)\n\n else:\n print(Y+'[!]'+R+'DMARC Record Not found..!!!')\n\n write_dns_to_file(final_dns, final_dmarc, output)\n","repo_name":"Dhanush-T/website-cracker","sub_path":"web-recon/modules/dns.py","file_name":"dns.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72222479481","text":"import torch\nimport numpy as np\nimport tensorflow as tf\n\n\nclass ScoreEstimator(object):\n def __init__(self):\n pass\n\n def rbf_kernel(self, x1, x2, kernel_width):\n return torch.exp(-torch.sum((x1 - x2) ** 2, dim=-1) /\n (2 * kernel_width ** 2))\n\n def gram(self, x1, x2, kernel_width):\n x_row = torch.unsqueeze(x1, dim=-2)\n x_col = torch.unsqueeze(x2, dim=-3)\n kernel_width = kernel_width[..., None, None]\n return self.rbf_kernel(x_row, x_col, kernel_width)\n\n def grad_gram(self, x1, x2, kernel_width):\n x_row = torch.unsqueeze(x1, dim=-2)\n x_col = torch.unsqueeze(x2, dim=-3)\n kernel_width = kernel_width[..., None, None]\n G = self.rbf_kernel(x_row, x_col, kernel_width)\n diff = (x_row - x_col) / (kernel_width[..., None] ** 2)\n G_expand = torch.unsqueeze(G, dim=-1)\n grad_x2 = G_expand * diff\n grad_x1 = G_expand * (-diff)\n return G, grad_x1, grad_x2\n\n def heuristic_kernel_width(self, x_samples, x_basis):\n n_samples = x_samples.shape[-2]\n n_basis = x_basis.shape[-2]\n x_samples_expand = torch.unsqueeze(x_samples, dim=-2)\n x_basis_expand = torch.unsqueeze(x_basis, dim=-3)\n pairwise_dist = torch.sqrt(\n torch.sum((x_samples_expand - x_basis_expand) ** 2,\n dim=-1))\n k = n_samples * n_basis // 2\n top_k_values = torch.topk(\n pairwise_dist.view(-1, n_samples * n_basis),\n k=k, dim=-1)[0]\n kernel_width = top_k_values[:, -1].view(*(x_samples.shape[:-2]))\n return kernel_width.detach()\n\n def compute_gradients(self, samples, x=None):\n raise NotImplementedError()\n\n\nclass SpectralScoreEstimator(ScoreEstimator):\n def __init__(self, n_eigen=None, eta=None, n_eigen_threshold=0.99):\n super().__init__()\n self._n_eigen = n_eigen\n self._eta = eta\n self._n_eigen_threshold = n_eigen_threshold\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n tf.enable_eager_execution(config=config)\n\n def nystrom_ext(self, samples, x, eigen_vectors, eigen_values, kernel_width):\n # samples: [..., M, x_dim]\n # x: [..., N, x_dim]\n # eigen_vectors: [..., M, n_eigen]\n # eigen_values: [..., n_eigen]\n # return: [..., N, n_eigen], by default n_eigen=M.\n M = samples.shape[-2]\n # Kxq: [..., N, M]\n # grad_Kx: [..., N, M, x_dim]\n # grad_Kq: [..., N, M, x_dim]\n Kxq = self.gram(x, samples, kernel_width)\n # Kxq = tf.Print(Kxq, [tf.shape(Kxq)], message=\"Kxq:\")\n # ret: [..., N, n_eigen]\n ret = np.sqrt(M) * torch.matmul(Kxq, eigen_vectors)\n ret *= 1. / torch.unsqueeze(eigen_values, dim=-2)\n return ret\n\n def compute_gradients(self, samples, x=None):\n # samples: [..., M, x_dim]\n # x: [..., N, x_dim]\n if x is None:\n kernel_width = self.heuristic_kernel_width(samples, samples)\n x = samples\n else:\n # _samples: [..., N + M, x_dim]\n _samples = torch.cat([samples, x], dim=-2)\n kernel_width = self.heuristic_kernel_width(_samples, _samples)\n\n M = samples.shape[-2]\n # Kq: [..., M, M]\n # grad_K1: [..., M, M, x_dim]\n # grad_K2: [..., M, M, x_dim]\n Kq, grad_K1, grad_K2 = self.grad_gram(samples, samples, kernel_width)\n if self._eta is not None:\n Kq += self._eta * torch.eye(M, device=samples.device)\n\n with tf.device(\"/cpu:0\"):\n eigen_values, eigen_vectors = tf.self_adjoint_eig(Kq.cpu().numpy())\n\n eigen_values = torch.tensor(eigen_values.numpy(), device=samples.device)\n eigen_vectors = torch.tensor(eigen_vectors.numpy(), device=samples.device)\n\n # eigen_values = []\n # eigen_vectors = []\n # for Mat in Kq:\n # e, v = torch.symeig(Mat, eigenvectors=True, upper=False)\n # eigen_values.append(e)\n # eigen_vectors.append(v)\n #\n # eigen_values = torch.stack(eigen_values, dim=0)\n # eigen_vectors = torch.stack(eigen_vectors, dim=0)\n\n if (self._n_eigen is None) and (self._n_eigen_threshold is not None):\n eigen_arr = torch.mean(\n eigen_values.view(-1, M), dim=0)\n # eigen_arr = eigen_arr[..., ::-1]\n eigen_arr = eigen_arr.flip([-1])\n eigen_arr /= torch.sum(eigen_arr)\n eigen_cum = torch.cumsum(eigen_arr, dim=-1)\n self._n_eigen = torch.sum(\n (eigen_cum < self._n_eigen_threshold).int())\n if self._n_eigen is not None:\n # eigen_values: [..., n_eigen]\n # eigen_vectors: [..., M, n_eigen]\n eigen_values = eigen_values[..., -self._n_eigen:]\n eigen_vectors = eigen_vectors[..., -self._n_eigen:]\n # eigen_ext: [..., N, n_eigen]\n eigen_ext = self.nystrom_ext(\n samples, x, eigen_vectors, eigen_values, kernel_width)\n # grad_K1_avg = [..., M, x_dim]\n grad_K1_avg = torch.mean(grad_K1, dim=-3)\n # beta: [..., n_eigen, x_dim]\n beta = -np.sqrt(M) * torch.matmul(\n eigen_vectors.transpose(-1, -2), grad_K1_avg) / torch.unsqueeze(\n eigen_values, dim=-1)\n # grads: [..., N, x_dim]\n grads = torch.matmul(eigen_ext, beta)\n return grads\n\n\nclass SteinScoreEstimator(ScoreEstimator):\n def __init__(self, eta=0.001):\n super().__init__()\n self._eta = eta\n\n def compute_gradients(self, samples, x=None):\n # samples: [..., M, x_dim]\n # x: [..., 1, x_dim]\n M = samples.shape[-2]\n # kernel_width: [...]\n kernel_width = self.heuristic_kernel_width(samples, samples)\n # K: [..., M, M]\n # grad_K1: [..., M, M, x_dim]\n # grad_K2: [..., M, M, x_dim]\n K, grad_K1, grad_K2 = self.grad_gram(samples, samples,\n kernel_width)\n # K_inv: [..., M, M]\n Kinv = torch.inverse(K + self._eta * torch.eye(M, device=samples.device))\n # H_dh: [..., M, x_dim]\n H_dh = torch.sum(grad_K2, dim=-2)\n # grads: [..., M, x_dim]\n grads = - torch.matmul(Kinv, H_dh)\n if x is None:\n return grads\n else:\n assert x.shape[-2] == 1, \"Only support single-particle out-of-sample extension.\"\n Kxx = self.gram(x, x, kernel_width)\n # Kxq: [..., 1, M]\n Kxq = self.gram(x, samples, kernel_width)\n # Kxq @ K_inv: [..., 1, M]\n KxqKinv = torch.matmul(Kxq, Kinv)\n # term1: [..., 1, 1]\n term1 = -1. / (Kxx + self._eta -\n torch.matmul(KxqKinv, Kxq.transpose(-1, -2)))\n # grad_Kqx2: [..., M, 1, x_dim]\n Kqx, grad_Kqx1, grad_Kqx2 = self.grad_gram(samples, x, kernel_width)\n # term2: [..., 1, x_dim]\n term2 = torch.matmul(Kxq, grads) - torch.matmul(KxqKinv + 1.,\n torch.squeeze(grad_Kqx2, -2))\n # ret: [..., 1, x_dim]\n return torch.matmul(term1, term2)\n","repo_name":"ermongroup/sliced_score_matching","sub_path":"models/kernel_score_estimators.py","file_name":"kernel_score_estimators.py","file_ext":"py","file_size_in_byte":7216,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"40"} +{"seq_id":"19222692503","text":"\"\"\"election URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\n\nfrom election import views\n\n\nurlpatterns = [\n url(r'^$', views.browse, name='browse'),\n url(r'^admin/', admin.site.urls),\n url(r'^api/endorsements.json', views.get_endorsements,\n name='get-endorsements'),\n url(r'^api/tags.json', views.get_tags, name='get_tags'),\n url(r'^api/search.json', views.search_endorsers,\n name='search_endorsers'),\n url(r'^endorser/$', views.add_endorser,\n name='add-endorser'),\n url(r'^endorser/(?P\\d+)/$', views.view_endorser,\n name='view-endorser'),\n url(r'^endorser/(?P\\d+)/add-account$',\n views.add_account, name='add-account'),\n url(r'^endorser/(?P\\d+)/add-endorsement$',\n views.add_endorsement, name='add-endorsement'),\n url(r'^endorsers/random$',\n views.random_endorser, name='random-endorser'),\n url(r'^progress/wikipedia$',\n views.progress_wikipedia, name='progress-wikipedia'),\n url(r'^progress/wikipedia/(?P[^/]+)/missing$',\n views.progress_wikipedia_missing, name='progress-wikipedia-missing'),\n url(r'^progress/wikipedia/(?P[^/]+)/(?P\\w+)$',\n views.progress_wikipedia_list, name='progress-wikipedia-list'),\n url(r'^progress/tagging$', views.progress_tagging,\n name='progress-tagging'),\n url(r'^progress/twitter$', views.progress_twitter,\n name='progress-twitter'),\n url(r'^confirm/endorsements$',\n views.confirm_endorsements, name='confirm-endorsements'),\n url(r'^confirm/endorsements/(?P\\d+)', views.confirm_endorsement,\n name='confirm-endorsement'),\n url(r'^confirm/newspapers$', views.confirm_newspapers,\n name='confirm-newspapers'),\n url(r'^confirm/newspapers/(?P\\d+)', views.confirm_newspaper,\n name='confirm-newspaper'),\n url(r'^stats/states$', views.stats_states, name='stats-states'),\n url(r'^stats/predictions$', views.stats_predictions,\n name='stats-predictions'),\n url(r'^stats/tags$', views.stats_tags, name='stats-tags'),\n url(r'^stats/charts$', views.charts, name='charts'),\n]\n\n\nif settings.DEBUG:\n import debug_toolbar\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n ]\n","repo_name":"endorsementdb/endorsementdb.com","sub_path":"election/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"22774011672","text":"import os\n\nimport gym\nimport panda_gym\n\n\ndef test():\n \"\"\"test panda-gym environment\"\"\"\n env = gym.make(\"PandaStack-v1\", render=True)\n\n p = env.sim.physics_client\n path = os.path.abspath(os.path.join(__file__, \"../..\"))\n p.loadURDF(\n f\"{path}/ropiens/ropiens.urdf\",\n [-0.2, 0.9, 0.3],\n [0.5, 0.5, 0.5, 0.5],\n globalScaling=3,\n useFixedBase=1,\n )\n\n obs = env.reset()\n while True:\n env.render()\n action = env.action_space.sample()\n env.step(action)\n\n env.close()\n\n\nif __name__ == \"__main__\":\n test()\n","repo_name":"ropiens/project-sandwich-man","sub_path":"test/test_env.py","file_name":"test_env.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"15575224658","text":"class Solution:\n def checkInclusion(self, s1: str, s2: str) -> bool:\n\n # create frequency map in pattern\n char_frequency = {}\n for char in s1:\n if char not in char_frequency:\n char_frequency[char] = 0\n char_frequency[char] += 1\n\n window_start = 0\n matched = 0\n for window_end in range(len(s2)):\n # add char to the window\n current_char = s2[window_end]\n\n # if current character in the frequency map\n if current_char in char_frequency:\n # decrement the frequency\n char_frequency[current_char] -= 1\n # if frequency is 0,we have a complete match of one character within the window\n # increased the matched distinct character count\n if char_frequency[current_char] == 0:\n matched += 1\n # if any point,matched distinct character count equal to length of freq map\n # we found a permutation,return true\n if matched == len(char_frequency):\n return True\n\n # if window end is greater than the length of the pattern\n # remove characters from beginning of the window\n if window_end >= len(s1) - 1:\n remove_char = s2[window_start]\n # if the character to be remove was in part of the freq map\n if remove_char in char_frequency:\n # and the character freq was 0,since we are removing character from window matched count\n # decrement by one\n if char_frequency[remove_char] == 0:\n matched -= 1\n # since we are removing from the window,frequency count should increment\n char_frequency[remove_char] += 1\n window_start += 1\n return False\n","repo_name":"rajithst/leetcode-challenges","sub_path":"strings/medium/permutations_in_string.py","file_name":"permutations_in_string.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"18748020568","text":"import types\nfrom typing import Any\n\nimport lightly\nimport torch\nfrom dagster import In, Out, op\nfrom omegaconf import DictConfig\n\nfrom src.train_models.models import SimCLR\n\n\n@op(ins={\"params\": In(DictConfig)})\ndef get_criterion(params) -> torch.nn.Module:\n \"\"\"Method to get the required criterion for training SimCLR\"\"\"\n return getattr(\n lightly.loss, params.model.criterion\n )()\n\n\n@op(ins={\"params\": In(DictConfig)})\ndef build_model(\n params: DictConfig,\n) -> torch.nn.Module:\n \"\"\"Method to build SimCLR model\"\"\"\n return SimCLR(params.model.backbone)\n\n\n@op(\n ins={\n \"params\": In(DictConfig),\n \"model_params\": In(types.GeneratorType),\n }\n)\ndef build_optimizer(\n params: DictConfig,\n model_params: types.GeneratorType,\n) -> torch.optim.Optimizer:\n \"\"\"Method to build optimizer\"\"\"\n lr = (\n params.optimizer.start_lr\n * (params.training.batch_size)\n / 256\n )\n return getattr(\n torch.optim,\n params.optimizer.optimizer_name,\n )(\n params=model_params,\n lr=lr,\n momentum=params.optimizer.momentum,\n weight_decay=params.optimizer.weight_decay,\n )\n\n\n@op(\n ins={\n \"params\": In(DictConfig),\n \"optimizer\": In(torch.optim.Optimizer),\n }\n)\ndef build_scheduler(\n optimizer: torch.optim.Optimizer,\n params: DictConfig,\n):\n \"\"\"Method to build cosine annealing scheduler\"\"\"\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n optimizer, params.epoch\n )\n return scheduler\n\n\n@op(\n ins={\"params\": In(DictConfig)},\n out={\n \"model\": Out(torch.nn.Module),\n \"optimizer\": Out(torch.optim.Optimizer),\n \"scheduler\": Out(Any),\n },\n)\ndef compile_model(params):\n \"\"\"Method to compile model, scheduler and optimizer\"\"\"\n model = build_model(params)\n optimizer = build_optimizer(\n params=params,\n model_params=model.parameters(),\n )\n scheduler = build_scheduler(optimizer, params)\n return model, optimizer, scheduler\n\n\n# __all__ = [\"compile_model\"]\n","repo_name":"Atharva-Phatak/shopme","sub_path":"src/train_models/model_ops.py","file_name":"model_ops.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"40"} +{"seq_id":"8591024419","text":"import logging\nimport re\n\n# ------------------------------------------------------------------------------- #\n\n\ndef template(body, domain):\n BUG_REPORT = 'Cloudflare may have changed their technique, or there may be a bug in the script.'\n\n try:\n js = re.search(\n r'setTimeout\\(function\\(\\){\\s+(.*?a\\.value\\s*=\\s*\\S+toFixed\\(10\\);)',\n body,\n re.M | re.S\n ).group(1)\n except Exception:\n raise ValueError('Unable to identify Cloudflare IUAM Javascript on website. {}'.format(BUG_REPORT))\n\n jsEnv = '''String.prototype.italics=function(str) {{return \"\" + this + \"\";}};\n var subVars= {{{subVars}}};\n var document = {{\n createElement: function () {{\n return {{ firstChild: {{ href: \"https://{domain}/\" }} }}\n }},\n getElementById: function (str) {{\n return {{\"innerHTML\": subVars[str]}};\n }}\n }};\n '''\n\n try:\n js = js.replace(\n r\"(setInterval(function(){}, 100),t.match(/https?:\\/\\//)[0]);\",\n r\"t.match(/https?:\\/\\//)[0];\"\n )\n\n k = re.search(r\" k\\s*=\\s*'(?P\\S+)';\", body).group('k')\n r = re.compile(r'
\\d+)\">\\s*(?P[^<>]*)
'.format(k))\n\n subVars = ''\n for m in r.finditer(body):\n subVars = '{}\\n\\t\\t{}{}: {},\\n'.format(subVars, k, m.group('id'), m.group('jsfuck'))\n subVars = subVars[:-2]\n\n except: # noqa\n logging.error('Error extracting Cloudflare IUAM Javascript. {}'.format(BUG_REPORT))\n raise\n\n return '{}{}'.format(\n re.sub(\n r'\\s{2,}',\n ' ',\n jsEnv.format(\n domain=domain,\n subVars=subVars\n ),\n re.MULTILINE | re.DOTALL\n ),\n js\n )\n\n# ------------------------------------------------------------------------------- #\n","repo_name":"VeNoMouS/cloudscraper","sub_path":"cloudscraper/interpreters/encapsulated.py","file_name":"encapsulated.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":3589,"dataset":"github-code","pt":"40"} +{"seq_id":"74217178359","text":"#/usr/bin/python3\n\"\"\"\nAuthor:fingerkc\ndate:20180729\nfunc:refresh all files to a contents on html then push to git \n\"\"\"\n\nimport os\nfrom os.path import getsize,join\nDIR_MAX_SIZE=1024*1024*10 ###10 M\nREMOVE_LIST=[\".git\"] ##移除的文件夹\nDONAME=\"\" #https://coding.fyping.cn\nhrefs=[]\nnames=[]\nfor root , dirs , files in os.walk(\"./\"):\n dir_files_size=0\n #print([str(getsize(join(root,name)))+\"bytes\" for name in files],[name for name in files])\n for i in [getsize(join(root,name)) for name in files]:\n dir_files_size+=i\n if(dir_files_size>DIR_MAX_SIZE):\n print(\"-------Warning: %s is over %f M\"%(root,dir_files_size))\n #print([\"root is %s\"%(\"/\".join(str(join(root,name)).split(\"\\\\\") ) )[1:]for name in files])\n hrefs = hrefs+[(\"/\".join(str(join(root,name)).split(\"\\\\\") ) )[1:]for name in files]\n names=names+files\n for i in REMOVE_LIST:\n if i in dirs:\n dirs.remove(i)\nprint(len(names),len(hrefs),names,hrefs)\nhead=[\"\",\"\",\"\",\"目录\",\"\",\"\"]\nbottom=[\"\"]\nwith open(\"./index.html\",\"w\",encoding=\"utf-8\") as f:\n for i in head:\n f.write(i)\n f.write(\"\")\n for i in bottom:\n f.write(i)\nargs=[\"git pull\",\"git add .\",\"git commit -m'200'\",\"git push -f\"]\nfor i in args:\n os.system(i)\n ","repo_name":"fingerecho/fingerecho.github.io","sub_path":"subcontent/backup/refresh.py","file_name":"refresh.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"19433494357","text":"from typing import List\nfrom collections import Counter\n\n\nclass Solution:\n def findContentChildren(self, g: List[int], s: List[int]) -> int:\n g.sort(reverse=True)\n s.sort(reverse=True)\n\n sum = i = j = 0\n while i < len(g) and j < len(s):\n if g[i] <= s[j]:\n i, j, sum = i + 1, j + 1, sum + 1\n else:\n i += 1\n return sum\n","repo_name":"plocinskipiotr/my_leetcode","sub_path":"problems/easy/assign_cookies/assign_cookies.py","file_name":"assign_cookies.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38069784762","text":"from projetoextensao.settings.base import *\nimport dj_database_url\n\nDEBUG = False\nALLOWED_HOSTS = ['projetoextensao.herokuapp.com']\nSECRET_KEY = os.environ['DJANGO_KEY']\nDATABASES['default'] = dj_database_url.config()\n\n# Base url for static files\nSTATIC_FILES_URL = os.environ['STATIC_FILES_URL']\n","repo_name":"dmodena/projetoextensao","sub_path":"projetoextensao/settings/prod.py","file_name":"prod.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"19956866129","text":"from turtle import Turtlee\nfrom gui import Gui\nfrom help_functions import *\nimport math\nfrom Exceptions_ch import *\nimport pytest\n\n\ndef test_turtle(distance3=-100, distance1=100, distance2=500000, ang1=730):\n\n turtle2 = Turtlee(x0=300, y0=300, angle=30)\n gui2 = Gui()\n turtle2.forward(distance1, gui2)\n assert turtle2.position_x == round(300 + math.sqrt(3) * 50)\n assert turtle2.position_y == 250\n\n turtle2.position_x = 0\n turtle2.position_y = 0\n turtle2.ang_position = 0\n turtle2.forward(distance2, gui2)\n assert turtle2.position_x == turtle2.max_x\n assert turtle2.position_y == 0\n\n turtle2.position_x = 300\n turtle2.position_y = 0\n turtle2.ang_position = 0\n turtle2.forward(distance3, gui2)\n assert turtle2.position_x == 200\n assert turtle2.position_y == 0\n\n turtle2.rotate(ang1, gui2)\n assert turtle2.ang_position == 10\n\n\ndef test_formaterror():\n with pytest.raises(UnknownFormatError):\n turtle = Turtlee()\n format_checker(\"das dasdsa \", turtle)\n\n\ndef test_commanderror():\n with pytest.raises(UnknownCommandError):\n turtle = Turtlee()\n command_checker('gora', 30, turtle)\n\n\ndef test_argumenterror():\n with pytest.raises(InvalidArgumentError):\n turtle = Turtlee()\n argument_checker('naprzod', 'haha', turtle)\n\n\ndef test_turtleimage():\n with pytest.raises(FileDoesNotExistError):\n turtle = Turtlee(path_to_turtle_false='nic')\n\n\ndef test_format():\n turtle = Turtlee()\n komenda1, argument1 = format_checker('naprzod 50', turtle)\n komenda2, argument2 = format_checker('podnies', turtle)\n assert komenda1 == 'naprzod'\n assert argument1 == '50'\n assert komenda2 == 'podnies'\n assert argument2 == ''\n\n\ndef test_argument():\n turtle = Turtlee()\n x = argument_checker('naprzod', '50', turtle)\n assert x is True\n\n\ndef test_command():\n turtle = Turtlee()\n x = command_checker('naprzod', '50', turtle)\n assert x is True\n","repo_name":"WojciechGierulski/turtle","sub_path":"test_logo.py","file_name":"test_logo.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35912924022","text":"# -*- coding: utf-8 -*-\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport base64\nimport re\nfrom datetime import datetime as dt\n\n# BEGIN IMPORTS AND DEFS FROM Main_Indicators.py\n# Import libraries\nimport pandas as pd\npd.set_option('display.max_rows', 100)\npd.set_option('display.max_columns', 100)\npd.set_option('display.width', 400)\nimport numpy as np\nimport os # misc operating system interfaces\nfrom glob import glob # unix pathname functions\n\n\n#def loadTickers(start_date,record_date,todays_date,tickers,path=None): # not sure what to use path for\ndef loadTickers(start_date,record_date,todays_date,tickers):\n #print(\" in loadTickers\")\n from distutils.version import LooseVersion, StrictVersion\n from pandas_datareader import data\n if StrictVersion(pd.__version__) < StrictVersion(u'0.23.4'):\n print (\"Error: Old version of pandas used in loadTickers gives columns of objects, not floats\")\n print (\"Pandas \",pd.__version__ , \" Numpy \", np.__version__)\n if not os.path.exists('Figures'):\n os.makedirs('Figures')\n if not os.path.exists('Spreadsheets'):\n os.makedirs('Spreadsheets')\n\n return data.DataReader(tickers, \n start=start_date, \n end=record_date, \n data_source='yahoo')['Adj Close']\n\n\ndef allIndicators(moduleList, date):\n import datetime\n #print(\" in allIndicators \")\n # create a date range\n #currentDT = datetime.datetime.now()\n #currentDT = datetime.date(2019, 2, 1)\n currentDT = dt.strptime(date, \"%Y-%m-%d\")\n #if currentDT.month < 10:\n # todays_date = str(currentDT.year) + \"-0\" + str(currentDT.month) + \"-\" + str(currentDT.day)\n #else:\n # todays_date = str(currentDT.year) + \"-\" + str(currentDT.month) + \"-\" + str(currentDT.day)\n todays_date = date\n #todays_date = str(currentDT.year) + \"-\" + str(currentDT.month) + \"-\"+ str(currentDT.day)\n # NOTE: can make the record_date a date in the past to generate previous Technical and Economic indicators\n record_date = todays_date\n print(\"########## date=\"+date)\n print(\"########## record_date=\"+record_date)\n #oneYearAgo = (currentDT - datetime.timedelta(366))\n # two years of data is needed to calculate 12M Gain and Fund X Score indicators\n twoYearsAgo = (currentDT - datetime.timedelta(731))\n start_date = str(twoYearsAgo.year) + \"-\" + str(twoYearsAgo.month) + \"-\"+ str(twoYearsAgo.day)\n \n # Load the tickers for date range\n data = loadTickers(start_date,record_date,todays_date,[\"SPY\",\"BIL\"])\n #print(\" exited loadTickers\")\n # Determine last market day of last month\n # Get the last day of last month by taking the first day of this month\n # and subtracting 1 day.\n lastDay = datetime.date(currentDT.year, currentDT.month, 1) - datetime.timedelta(1)\n # Set the day to 1 gives us the start of the month\n firstDay = lastDay.replace(day=1)\n last_EOM_date = data[\"SPY\"].loc[firstDay:lastDay].last('1D').index # get the last market day last month\n \n strLast_EOM_date = last_EOM_date.strftime(\"%Y-%m-%d\") # get the last market day last month\n print(\"Last Market Day last Month\", str(strLast_EOM_date[0]))\n # print(\"type(strLast_EOM_date) = \", type(str(strLast_EOM_date[0])))\n # last_EOM_date = str(strLast_EOM_date[0])\n # Determine last market day of the month before last\n # Get the last day of last month by taking the first day of last month\n # and subtracting 1 day.\n # Set the day to 1 gives us the start of the month\n \n if (currentDT.month > 1):\n lastDay = datetime.date(currentDT.year, currentDT.month-1, 1) - datetime.timedelta(1)\n else:\n lastDay = datetime.date(currentDT.year-1, currentDT.month+11, 1) - datetime.timedelta(1)\n firstDay = lastDay.replace(day=1)\n previous_EOM_date = data[\"SPY\"].loc[firstDay:lastDay].last('1D').index\n\n # get the last market day last month\n\n strPrevious_EOM_date = previous_EOM_date.strftime('%Y-%m-%d') # and month before last\n print(\"Last Market Day Month before last\", str(strPrevious_EOM_date[0]))\n \n # the following line is equivolent to :\n # spreadsheetData = []\n #for for module in moduleList: spreadsheetData.append(__import__(module).Indicator(data))\n \n spreadsheetList = [__import__(module).Indicator(data, record_date, last_EOM_date, previous_EOM_date) for module in moduleList]\n \n # print(\" done with streadsheetList \")\n # Turn aggegated results into spreadsheet.\n # print(\"type(spreadsheetData) = \", type(spreadsheetData))\n # print (spreadsheetData)\n print(\"\\n\")\n spreadsheet = pd.DataFrame(spreadsheetList, columns = ['Technical Indicator','Frequency', 'MonthBeforeLast', 'LastMonth','Comment'])\n print(spreadsheet)\n \n writer = pd.ExcelWriter(str('Spreadsheets/'+record_date +'_indicator_sheet.xlsx'))\n spreadsheet.to_excel(writer,'Indicators', index=False)\n writer.save()\n\n writer2 = pd.ExcelWriter(str('Spreadsheets/indicator_sheet.xlsx'))\n #writer2 = pd.ExcelWriter(str('Figures/'+record_date +'_indicator_sheet.xlsx')) #repeats over and over\n spreadsheet.to_excel(writer2,'Indicators', index=False)\n writer2.save()\n# END IMPORTS AND DEFS FROM Main_Indicators.py\n\n#import datetime\n#currentDT = datetime.datetime.now()\n#todays_date = str(currentDT.year) + \"-\" + str(currentDT.month) + \"-\"+ str(currentDT.day)\n## NOTE: can make the record_date a date in the past to generate previous Technical and Economic indicators\n#record_date = todays_date\n#spreadsheet = pd.read_excel(open('Spreadsheets/2019-7-19_indicator_sheet.xlsx','rb'), sheetname='Indicators')\n#spreadsheet = pd.read_excel(open('Spreadsheets/2019-7-19_indicator_sheet.xlsx','rb'))\nspreadsheet = pd.read_excel(open('Spreadsheets/indicator_sheet.xlsx','rb'))\n#spreadsheet = pd.read_excel(open(str('Spreadsheets/'+record_date +'_indicator_sheet.xlsx'),'rb'))\n\ndef generate_table(dataframe, max_rows=10):\n return html.Table(\n # Header\n [html.Tr([html.Th(col) for col in dataframe.columns])] +\n\n # Body\n [html.Tr([\n html.Td(dataframe.iloc[i][col]) for col in dataframe.columns\n ]) for i in range(min(len(dataframe), max_rows))]\n )\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\nserver = app.server\n\nall_options = {\n 'America': ['New York City', 'San Francisco', 'Cincinnati'],\n 'Canada': [u'Montréal', 'Toronto', 'Ottawa']\n}\nglobal_indicator = \"\"\n\nmodules =[name[:-3] for name in glob(\"i*.py\")] #i.e.i01_10MSMASPY.py\nmodules.sort()\n\napp.layout = html.Div([\n dcc.DatePickerSingle(\n id='current-date',\n date=dt.now()\n ),\n html.Div(id='display-date', style={'display': 'none'}),\n #html.Div(id='display-date'),\n \n dcc.Tabs(id=\"tabs\", children=[\n dcc.Tab(label=\"Image\", children=[\n dcc.RadioItems(\n #id='countries-dropdown',\n #options=[{'label': k, 'value': k} for k in all_options.keys()],\n #value='America'\n id='indicators-dropdown',\n options=[{'label': k, 'value': k} for k in modules],\n value=modules[0]\n ),\n html.Div(id='display-indicator', style={'display': 'none'}),\n html.Div(html.Img(id='display-image')),\n ]),\n dcc.Tab(label=\"Dataframe\", children=[\n #html.H4(children='Technical Indicators'),\n html.H4(id='title-pretable'),\n html.Pre(id='display-pretable')\n ]),\n dcc.Tab(label=\"Debug\", children=[\n html.Button(id='list-button', n_clicks=0, children='List Files'),\n html.Button(id='erase-button', n_clicks=0, children='Erase Files'),\n html.Div(id='display-files'),\n html.Div(id='erase-files', style={'display': 'none'}),\n html.H4(children='Technical Indicators'),\n generate_table(spreadsheet)\n ])\n ])\n])\n\n\n@app.callback(\n Output('display-files', 'children'),\n [Input('list-button', 'n_clicks')])\ndef display_files(nClicks):\n listOfFiles = os.listdir('Figures/')\n for i in listOfFiles:\n file = \"Figures/\"+i\n if os.path.exists(file):\n #os.remove(file)\n print(\"LIST \"+file)\n return \"|\\n\".join(listOfFiles)\n\n@app.callback(\n Output('erase-files', 'children'),\n [Input('erase-button', 'n_clicks')])\ndef erase_files(nClicks):\n listOfFiles = os.listdir('Figures/')\n for i in listOfFiles:\n file = \"Figures/\"+i\n if os.path.exists(file):\n ##os.remove(file)\n print(\"REMOVE1 \"+file)\n return \"|\\n\".join(listOfFiles)\n\n@app.callback(\n Output('display-date', 'children'),\n [Input('current-date', 'date')],\n [State('indicators-dropdown', 'value')])\ndef set_record_date(date, indicator):\n if date is not None:\n #date = re.sub(\"T*\",\"\", date)\n date = date[:10]\n print(\"##### current_date=\"+date)\n print(\"##### indicator =\"+indicator)\n #if os.path.exists(\"Figures/i02_Mini-Dipper SPY.png\"):\n # os.remove(\"Figures/i02_Mini-Dipper SPY.png\")\n #module = [indicator]\n #oneIndicator(module, date)\n listOfFiles = os.listdir('Figures/')\n for i in listOfFiles:\n file = \"Figures/\"+i\n if os.path.exists(file):\n #os.remove(file)\n print(\"REMOVE \"+file)\n modules =[name[:-3] for name in glob(\"i*.py\")] #i.e.i01_10MSMASPY.py\n modules.sort()\n allIndicators(modules, date)\n return date\n\n@app.callback(\n Output('display-pretable', 'children'),\n [Input('current-date', 'date')])\ndef set_record_date(date):\n if date is not None:\n #date = re.sub(\"T*\",\"\", date)\n date = date[:10]\n print(\"##### current_date=\"+date)\n df = pd.read_excel(open(\"Spreadsheets/\"+date+\"_indicator_sheet.xlsx\",\"rb\"))\n #df.rename({\"Technical Indicator\" : date+\" Technical Indicator\"}, axis=1)\n #pretable = \"Technical Indicators, \"+str(date)+\"\\n\\n\"+str(df)\n pretable = str(df)\n return pretable\n\n@app.callback(\n Output('title-pretable', 'children'),\n [Input('current-date', 'date')])\ndef set_title_pretable(date):\n if date is not None:\n date = date[:10]\n title = \"Technical Indicators, \"+date\n return title\n\n# @app.callback(\n # Output('title-table', 'children'),\n # [Input('current-date', 'date')])\n# def set_title_table(date):\n # if date is not None:\n # date = date[:10]\n # title = \"Technical Indicators, \"+date\n # return title\n\n#@app.callback(\n# Output('display-indicator', 'children'),\n# [Input('current-date', 'date'),\n# Input('indicators-dropdown', 'value')])\n#def set_cities_options(date, indicator):\n# #module = [name for name in glob(selected_indicator+\".py\")]\n# if date is not None:\n# date = date[:10]\n# print(\"##### 2current_date=\"+date)\n# print(\"##### 2indicator =\"+indicator)\n# module = [indicator]\n# oneIndicator(module, date)\n# return indicator\n\n@app.callback(\n Output('display-image', 'src'),\n [Input('current-date', 'date'),\n Input('indicators-dropdown', 'value')])\ndef set_image_options(date, selected_indicator):\n if date is not None:\n date = date[:10]\n #module = [name for name in glob(selected_indicator+\".py\")]\n print(\"date=\"+str(date)) #DEBUG\n print(\"selected_indicator=\"+str(selected_indicator)) #DEBUG\n return str(\"Figures/\"+date+\"_\"+selected_indicator + \".png\")\n\n# @app.callback(\n# Output('cities-dropdown', 'options'),\n# [Input('countries-dropdown', 'value')])\n# def set_cities_options(selected_country):\n# #modules =[name[:-3] for name in glob(\"i*.py\")] #i.e.i01_10MSMASPY.py\n# #modules.sort()\n# #return modules\n# return [{'label': i, 'value': i} for i in glob(\"i*.py\")]\n\n\n# @app.callback(\n# Output('cities-dropdown', 'value'),\n# [Input('cities-dropdown', 'options')])\n# def set_cities_value(available_options):\n# return available_options[0]['value']\n\n\n# @app.callback(\n# Output('display-selected-values', 'children'),\n# [Input('countries-dropdown', 'value'),\n# Input('cities-dropdown', 'value')])\n# def set_display_children(selected_country, selected_city):\n# return u'{} is a city in {}'.format(\n# selected_city, selected_country,\n# )\n\n\nif __name__ == '__main__':\n # BEGIN MAIN CODE FROM Main_Indicators.py\n #modules =[name[:-3] for name in glob(\"i*.py\")] #i.e.i01_10MSMASPY.py\n #modules.sort()\n #allIndicators(modules, date)\n # END MAIN CODE FROM Main_Indicators.py\n print(\"##### START app.run_server #####\")\n app.run_server(debug=True)\n","repo_name":"rdavis27/AAII-CIMI-Indicators-Dash","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27759915842","text":"import math\n\n\ndef gcd(a, b):\n \"\"\"Return greatest common divisor of a and b\"\"\"\n if a == 0 or b == 0:\n return a + b\n # recursive will reach maximum recursion depth\n while a != b:\n a, b = (a - b, b) if a > b else (a, b - a)\n return a\n\n\ndef lcm(a, b):\n \"\"\"Return least common multiple\"\"\"\n if not isinstance(a, int):\n a = int(a)\n if not isinstance(b, int):\n b = int(b)\n return abs(a*b) / gcd(a, b)\n\n\ndef lcm_series(series):\n assert len(series) > 0, \"series cannot be empty\"\n ret = series[0]\n for i in series[1:]:\n ret = lcm(ret, i)\n return ret\n\n\nif __name__ == \"__main__\":\n assert int(lcm(20, 30)) == 60\n assert int(lcm(1, 1)) == 1\n assert int(lcm(3, 0)) == 0\n print('Pass test')\n print(int(lcm_series(range(1, 20))))\n","repo_name":"quoctin/project_euler","sub_path":"src/smallest_multiple.py","file_name":"smallest_multiple.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72290603961","text":"'''\nThis file is part of python-libdeje.\n\npython-libdeje is free software: you can redistribute it and/or modify\nit under the terms of the GNU Lesser General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\npython-libdeje is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Lesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public License\nalong with python-libdeje. If not, see .\n'''\n\nfrom __future__ import absolute_import\n\nfrom deje.tests.dexter_commands import DexterCommandTester\n\nclass TestDexterBasicGroup(DexterCommandTester):\n\n def test_help(self):\n with self.io:\n self.interface.do_command('help')\n self.assertEqual(self.interface.view.contents, [\n 'msglog> help',\n 'Dexter is a low-level DEJE client.',\n 'It\\'s perfect for low-level management of documents.',\n 'Type \"commands\" to see the list of available commands.',\n 'Type \"help somecommand\" to see more about a command.',\n ])\n\n def test_help_with_args(self):\n with self.io:\n self.interface.do_command('help help commands blooby')\n self.assertEqual(self.interface.view.contents, [\n 'msglog> help help commands blooby',\n 'help :: A simple little help message.',\n '',\n 'You can also view full descriptions with \"help commandname\".',\n 'commands :: List all available commands.',\n 'blooby :: No such command.',\n ])\n\n def test_commands(self):\n with self.io:\n self.interface.do_command('commands')\n self.assertEqual(self.interface.view.contents, [\n 'msglog> commands',\n 'commands :: List all available commands.',\n 'demo :: No description available.',\n 'devent :: Propose a change to the document.',\n 'dexport :: Serialize the current document to disk.',\n 'dget_latest :: Get the latest version number of the doc.',\n 'dinit :: Initialize DEJE interactivity.',\n 'dvexport :: Serialize the current document to variable storage.',\n 'fread :: Read contents of a file as a series of commands.',\n 'fwrite :: Write contents of a view to a file.',\n 'help :: A simple little help message.',\n 'quit :: Exit the program.',\n 'vclone :: Copy variable data from one location to another.',\n 'vdel :: Delete a value from variable storage.',\n 'vget :: Print a value in variable storage.',\n 'view :: List views, or select one.',\n 'vload :: Load a variable value from disk.',\n 'vsave :: Save a variable value to disk.',\n 'vset :: Set a value in variable storage.',\n ])\n","repo_name":"campadrenalin/python-libdeje","sub_path":"deje/tests/test_dexter_commands_basic.py","file_name":"test_dexter_commands_basic.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"35817361612","text":"MONGODB = {\n \"uri\": \"mongodb://localhost:27017/\",\n \"db\": \"search_word\",\n \"collection\": \"words\"\n}\n\nREDIRECT_DATA = {\n \"Referer\": \"\", # 需要在check_redirect里面设置\n \"User-Agent\": \"\", # 需要在check_redirect里面设置\n \"Content-Type\": \"text/html; charset=utf-8\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\"\n}\n\n# 知网PC请求数据\nCNKI_URL = {\n \"base_url\": \"http://kns.cnki.net\",\n \"home_url\": \"http://kns.cnki.net/kns/request/SearchHandler.ashx?action=&NaviCode=*&\",\n \"list_url\": \"http://kns.cnki.net/kns/brief/brief.aspx?\",\n \"current_referer\": \"http://kns.cnki.net/kns/brief/default_result.aspx\"\n}\n\nCNKI_START = {\n \"txt_1_sel\": \"SU$%=|\",\n \"txt_1_value1\": \"\", # 需要在start_requests里面设置\n \"txt_1_special1\": \"%\",\n \"PageName\": \"ASP.brief_default_result_aspx\",\n \"ConfigFile\": \"SCDBINDEX.xml\",\n \"DbPrefix\": \"SCDB\",\n \"db_opt\": \"CJFQ,CDFD,CMFD,CPFD,IPFD,CCND,CCJD\",\n \"his\": 0,\n \"ua\": \"1.11\",\n \"isinEn\": \"1\",\n \"parentdb\": \"SCDB\",\n \"__\": \"\" # 需要在start_requests里面设置\n}\n\nCNKI_PARSE = {\n \"pagename\": \"ASP.brief_default_result_aspx\",\n \"isinEn\": \"1\",\n \"dbPrefix\": \"SCDB\",\n \"dbCatalog\": \"中国学术文献网络出版总库\",\n \"ConfigFile\": \"SCDBINDEX.xml\",\n \"research\": \"off\",\n \"t\": \"\", # 需要在parse里面设置\n \"keyValue\": \"\", # 需要在parse里面设置\n \"S\": \"1\",\n \"sorttype\": \"\",\n}\n\nCNKI_PARSE_LIST = {\n \"curpage\": \"\", # 需要在parse_list_first里面设置\n \"RecordsPerPage\": 20,\n \"QueryID\": 2,\n \"ID\": \"\",\n \"turnpage\": 1,\n \"tpagemode\": \"L\",\n \"dbPrefix\": \"SCDB\",\n \"Fields\": \"\",\n \"DisplayMode\": \"listmode\",\n \"PageName\": \"ASP.brief_default_result_aspx\",\n \"isinEn\": 1\n}\n\n# 万方的请求数据\nWANFANG = {\n \"searchType\": \"all\",\n \"showType\": \"detail\",\n \"pageSize\": 20,\n \"searchWord\": \"\", # 需要在start_requests里面设置\n \"isTriggerTag\": \"\"\n}\n\nWANFANG_NEXT = {\n \"beetlansyId\": \"aysnsearch\",\n \"searchType\": \"all\",\n \"pageSize\": 20,\n \"page\": \"\", # 需要在parse_link_list设置\n \"searchWord\": \"\", # 需要在parse_link_list设置\n \"order\": \"correlation\",\n \"showType\": \"detail\",\n \"isCheck\": \"check\",\n \"isHit\": \"\",\n \"isHitUnit\": \"\",\n \"firstAuthor\": \"false\",\n \"rangeParame\": \"\",\n \"navSearchType\": \"all\"\n}\n\nWANFANG_ITEM = {\n \"_type\": \"\",\n \"id\": \"\"\n}\n\n# 知网手机端请求数据\nCNKI_WAP_START = {\n \"kw\": \"\",\n \"field\": 5\n}\n\nCNKI_WAP_PARSE = {\n \"searchtype\": \"0\",\n \"dbtype\": \"\",\n \"pageindex\": \"1\",\n \"pagesize\": \"10\",\n \"theme_kw\": \"\",\n \"title_kw\": \"\",\n \"full_kw\": \"\",\n \"author_kw\": \"\",\n \"depart_kw\": \"\",\n \"key_kw\": \"\",\n \"abstract_kw\": \"\",\n \"source_kw\": \"\",\n \"teacher_md\": \"\",\n \"catalog_md\": \"\",\n \"depart_md\": \"\",\n \"refer_md\": \"\",\n \"name_meet\": \"\",\n \"collect_meet\": \"\",\n \"keyword\": \"\",\n \"remark\": \"\",\n \"fieldtype\": \"101\",\n \"sorttype\": \"0\",\n \"articletype\": \"-1\",\n \"screentype\": \"0\",\n \"isscreen\": \"\",\n \"subject_sc\": \"\",\n \"research_sc\": \"\",\n \"depart_sc\": \"\",\n \"sponsor_sc\": \"\",\n \"author_sc\": \"\",\n \"teacher_sc\": \"\",\n \"subjectcode_sc\": \"\",\n \"researchcode_sc\": \"\",\n \"departcode_sc\": \"\",\n \"sponsorcode_sc\": \"\",\n \"authorcode_sc\": \"\",\n \"teachercode_sc\": \"\",\n \"starttime_sc\": \"\",\n \"endtime_sc\": \"\",\n \"timestate_sc\": \"1\"\n}\n\n\n# 爱学术\nIXUESHU_START = {\n \"q\": \"\",\n \"sort\": \"year desc\"\n}\n\nIXUESHU_LINK = {\n \"q\": \"\",\n \"sort\": \"year desc\",\n \"page\": \"\"\n}\n","repo_name":"paulRoux/Paper","sub_path":"spider/spider/configs/base_setting.py","file_name":"base_setting.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"8281698547","text":"import datetime\nfrom app.api_v2.model.threat import ThreatValue\nfrom ..utils import check_org, default_org, token_required, user_has, ip_approved, page_results\nfrom flask_restx import Resource, Namespace, fields, inputs as xinputs\nfrom ..model import ThreatList, DataType\nfrom .shared import ISO8601, mod_pagination, ValueCount\nfrom ... import ep\n\n\nclass ThreatValueList(fields.Raw):\n ''' Returns values in the correct format'''\n\n def format(self, value):\n return '\\n'.join([v['value'] for v in value])\n\napi = Namespace('Lists', description=\"Intel List operations\", path=\"/list\")\n\nmod_data_type_list = api.model('DataTypeList', {\n 'uuid': fields.String,\n 'organization': fields.String,\n 'name': fields.String,\n 'description': fields.String,\n 'regex': fields.String,\n 'created_at': ISO8601(attribute='created_at'),\n 'updated_at': ISO8601(attribute='updated_at')\n})\n\nmod_threat_value = api.model('ThreatValue', {\n 'value': fields.String,\n 'record_id': fields.String,\n 'organization': fields.String,\n 'from_poll': fields.Boolean,\n 'poll_interval': fields.Integer,\n 'key_field': fields.String,\n 'data_type': fields.String,\n 'list_uuid': fields.String,\n 'list_name': fields.String,\n 'created_at': ISO8601\n})\n\nmod_list_list = api.model('ListView', {\n 'uuid': fields.String,\n 'organization': fields.String,\n 'name': fields.String,\n 'list_type': fields.String,\n 'tag_on_match': fields.Boolean,\n 'data_type': fields.Nested(mod_data_type_list),\n 'url': fields.String,\n 'poll_interval': fields.Integer,\n 'last_polled': ISO8601(attribute='last_polled'),\n 'values': ThreatValueList(),\n #'values_list': fields.List(fields.String, attribute='values'),\n 'to_memcached': fields.Boolean,\n 'active': fields.Boolean,\n 'value_count': fields.Integer,\n 'created_at': ISO8601(attribute='created_at'),\n 'updated_at': ISO8601(attribute='updated_at'),\n 'csv_headers': fields.String,\n 'csv_headers_data_types': fields.String,\n 'case_sensitive': fields.Boolean\n})\n\nmod_list_list_paged = api.model('ListViewPaged', {\n 'lists': fields.Nested(mod_list_list),\n 'pagination': fields.Nested(mod_pagination)\n})\n\nmod_list_create = api.model('ListCreate', {\n 'name': fields.String(required=True, example='SpamHaus eDROP'),\n 'organization': fields.String,\n 'list_type': fields.String(required=True, example='values'),\n 'tag_on_match': fields.Boolean(example=False),\n 'data_type_uuid': fields.String(required=True),\n 'values': fields.String(example='127.0.0.1\\n4.4.4.4\\n1.1.1.1'),\n 'polling_interval': fields.Integer(example=3600),\n 'url': fields.Url(description='A URL to pull threat data from', example='https://www.spamhaus.org/drop/edrop.txt'),\n 'to_memcached': fields.Boolean,\n 'active': fields.Boolean(example=True),\n 'csv_headers': fields.String,\n 'csv_headers_data_types': fields.String,\n 'case_sensitive': fields.Boolean\n})\n\nmod_list_values = api.model('ListValues', {\n 'values': fields.List(fields.String)\n})\n\nmod_list_match = api.model('ListMatch', {\n 'name': fields.String,\n 'value': fields.String,\n 'matched': fields.Boolean\n})\n\nmod_list_values_paged = api.model('ListValuesPaged', {\n 'values': fields.Nested(mod_threat_value),\n 'pagination': fields.Nested(mod_pagination)\n})\n\nlist_parser = api.parser()\nlist_parser.add_argument(\n 'data_type', location='args', required=False)\nlist_parser.add_argument(\n 'organization', location='args', required=False\n)\nlist_parser.add_argument(\n 'page', type=int, location='args', default=1, required=False)\nlist_parser.add_argument(\n 'page_size', type=int, location='args', default=10, required=False)\n\n@api.route(\"\")\nclass ThreatListList(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.marshal_with(mod_list_list_paged, as_list=True)\n @api.expect(list_parser)\n @token_required\n @default_org\n @user_has('view_lists')\n def get(self, user_in_default_org, current_user):\n ''' Returns a list of ThreatLists '''\n\n args = list_parser.parse_args()\n\n lists = ThreatList.search()\n\n if args.data_type:\n if user_in_default_org and args.organization:\n data_type = DataType.get_by_name(name=args.data_type, organization=args.organization)\n else:\n data_type = DataType.get_by_name(name=args.data_type)\n if data_type:\n lists = lists.filter('term', data_type_uuid=data_type.uuid)\n\n if user_in_default_org and args.organization:\n lists = lists.filter('term', organization=args.organization)\n\n lists, total_results, pages = page_results(lists, args.page, args.page_size)\n\n lists = lists.execute()\n\n response = {\n 'lists': list(lists),\n 'pagination': {\n 'total_results': total_results,\n 'pages': pages,\n 'page': args['page'],\n 'page_size': args['page_size']\n }\n }\n \n return response\n\n @api.doc(security=\"Bearer\")\n @api.expect(mod_list_create, validate=True)\n @api.marshal_with(mod_list_list)\n @api.response('409', 'ThreatList already exists.')\n @api.response('200', \"Successfully created the list.\")\n @token_required\n @user_has('add_list')\n def post(self, current_user):\n '''Creates a new ThreatList\n \n A threat list is what the system uses to determine if an observable\n is malicious or suspicious in nature. ThreatLists can be consumed\n via target URLs or manually entered in to the system, or added to\n via the API. \n\n Supported list types: `values|pattern|csv`\n\n When `url` is populated the `values` field will be ignored.\n\n '''\n\n value_list = ThreatList.get_by_name(name=api.payload['name'])\n\n if value_list:\n api.abort(409, \"ThreatList already exists.\")\n\n if api.payload['list_type'] not in ['values', 'patterns', 'csv']:\n api.abort(400, \"Invalid list type.\")\n\n # Remove any values entered by the user as they also want to pull\n # from a URL and the URL will overwrite their additions\n if 'url' in api.payload:\n del api.payload['values']\n\n # The polling interval must exist in the URL field exists\n if 'poll_interval' not in api.payload or api.payload['poll_interval'] is None:\n api.abort(400, 'Missing poll_interval')\n\n # Don't let the user define an insanely fast polling interval\n if int(api.payload['poll_interval']) < 60:\n api.abort(400, 'Invalid polling interval, must be greater than or equal to 60')\n \n if api.payload['list_type'] == 'csv':\n \n if not 'csv_headers' in api.payload:\n api.abort(400, 'CSV headers are required')\n\n if not 'csv_headers_data_types' in api.payload:\n api.abort(400, 'CSV header to data type mapping is required')\n\n mapping = {}\n headers = api.payload['csv_headers'].split(',')\n data_types = api.payload['csv_headers_data_types'].split(',')\n for i in range(0, len(headers)-1):\n if data_types[i] not in (\"none\",\"null\",\"\"):\n if headers[i] not in mapping:\n mapping[data_types[i]] = [headers[i]]\n else:\n mapping[data_types[i]].append(headers[i])\n\n # Store the data_type to field mapping\n api.payload['csv_header_map'] = mapping\n\n if 'values' in api.payload:\n _values = api.payload.pop('values')\n if not isinstance(_values, list):\n _values = _values.split('\\n')\n values = []\n for value in _values:\n if value == '':\n continue\n values.append(value)\n\n if 'data_type_uuid' in api.payload and DataType.get_by_uuid(api.payload['data_type_uuid']) is None:\n api.abort(400, \"Invalid data type\")\n\n value_list = ThreatList(**api.payload)\n value_list.save()\n \n\n if not 'url' in api.payload:\n value_list.set_values(values)\n\n ep.restart_workers()\n\n return value_list \n\n\n@api.route(\"/\")\nclass ThreatListDetails(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(mod_list_create)\n @api.marshal_with(mod_list_list)\n @token_required\n @user_has('update_list')\n @check_org\n def put(self, uuid, current_user):\n ''' Updates a ThreatList '''\n\n if 'url' in api.payload and api.payload['url']:\n del api.payload['values']\n\n # The polling interval must exist in the URL field exists\n if 'poll_interval' not in api.payload or api.payload['poll_interval'] is None:\n api.abort(400, 'Missing poll_interval')\n\n # Don't let the user define an insanely fast polling interval\n if int(api.payload['poll_interval']) < 60:\n api.abort(400, 'Invalid polling interval, must be greater than or equal to 60')\n\n value_list = ThreatList.get_by_uuid(uuid=uuid)\n\n if value_list:\n\n if 'name' in api.payload:\n l = ThreatList.get_by_name(name=api.payload['name'])\n if l and l.uuid != uuid:\n api.abort(\n 409, 'ThreatList with that name already exists.')\n\n if api.payload['list_type'] == 'csv':\n \n if not 'csv_headers' in api.payload:\n api.abort(400, 'CSV headers are required')\n\n if not 'csv_headers_data_types' in api.payload:\n api.abort(400, 'CSV header to data type mapping is required')\n\n mapping = {}\n headers = api.payload['csv_headers'].split(',')\n data_types = api.payload['csv_headers_data_types'].split(',')\n for i in range(0, len(headers)-1):\n if data_types[i] not in (\"none\",\"null\",\"\"):\n if headers[i] not in mapping:\n mapping[data_types[i]] = [headers[i]]\n else:\n mapping[data_types[i]].append(headers[i])\n\n # Store the data_type to field mapping\n api.payload['csv_header_map'] = mapping\n\n # CSV lists contain multiple values so we don't set a base data_type\n #api.payload['data_type_uuid'] = 'multiple'\n\n if 'values' in api.payload:\n\n values = api.payload.pop('values').split('\\n')\n if len(values) > 0:\n value_list.set_values(values)\n value_list.remove_values(values)\n\n # Update the list with all other fields\n if len(api.payload) > 0:\n value_list.update(**api.payload)\n ep.restart_workers()\n\n return value_list\n else:\n api.abort(404, 'ThreatList not found.')\n\n @api.doc(security=\"Bearer\")\n @token_required\n @user_has('delete_list')\n def delete(self, uuid, current_user):\n ''' Removes a ThreatList '''\n value_list = ThreatList.get_by_uuid(uuid=uuid)\n if value_list:\n values = ThreatValue.search()\n values = values.filter('term', list_uuid=value_list.uuid)\n value_list.delete()\n values.delete()\n \n return {'message': 'ThreatList successfully delete.'}\n else:\n api.abort(404, 'ThreatList not found.')\n\n @api.doc(security=\"Bearer\")\n @api.marshal_with(mod_list_list)\n @token_required\n @user_has('view_lists')\n def get(self, uuid, current_user):\n ''' Gets the details of a ThreatList '''\n\n value_list = ThreatList.get_by_uuid(uuid=uuid)\n if value_list:\n return value_list\n else:\n api.abort(404, 'ThreatList not found.')\n\n\n@api.route('/test//')\nclass ThreatListTest(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.marshal_with(mod_list_match)\n @token_required\n @user_has('view_lists')\n def get(self,current_user, uuid,value):\n\n intel_list = ThreatList.get_by_uuid(uuid)\n if intel_list:\n if intel_list.check_value(value):\n return {\n 'name': intel_list.name,\n 'value': value,\n 'matched': True\n }\n else:\n return {\n 'name': intel_list.name,\n 'value': value,\n 'matched': False\n }\n else:\n api.abort(404, {'message': 'Intel List not found'})\n\nlist_stats_parser = api.parser()\nlist_stats_parser.add_argument('list', location='args', type=str, action='split', required=False)\nlist_stats_parser.add_argument('value', location='args', type=str, action='split', required=False)\nlist_stats_parser.add_argument('value__like', location='args', type=str, required=False)\nlist_stats_parser.add_argument('list_name__like', location='args', type=str, required=False)\nlist_stats_parser.add_argument('record_id', location='args', type=str, required=False)\nlist_stats_parser.add_argument('data_type', location='args', type=str, action='split', required=False)\nlist_stats_parser.add_argument('from_poll', location='args', type=xinputs.boolean, required=False)\nlist_stats_parser.add_argument('top', location='args', default=10, type=int, required=False)\nlist_stats_parser.add_argument('start', location='args', default=(datetime.datetime.utcnow()-datetime.timedelta(days=7)).strftime('%Y-%m-%dT%H:%M:%S'), type=str, required=False)\nlist_stats_parser.add_argument('end', location='args', default=(datetime.datetime.utcnow()+datetime.timedelta(days=1)).strftime('%Y-%m-%dT%H:%M:%S'), type=str, required=False)\nlist_stats_parser.add_argument('interval', location='args', default='day', required=False, type=str)\nlist_stats_parser.add_argument('metrics', location='args', action='split', default=['list','value','data_type','from_poll'])\nlist_stats_parser.add_argument('organization', location='args', action='split', required=False)\n@api.route('/stats')\nclass IntelListStats(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(list_stats_parser)\n @token_required\n @user_has('view_lists')\n def get(self, current_user):\n \n args = list_stats_parser.parse_args()\n \n search_filters = []\n\n if args.value__like and args.value__like != '':\n search_filters.append({\n 'type': 'wildcard',\n 'field': 'value',\n 'value': \"*\"+args.value__like+\"*\"\n })\n\n if args.list_name__like and args.list_name__like != '':\n search_filters.append({\n 'type': 'wildcard',\n 'field': 'list_name',\n 'value': \"*\"+args.list_name__like.lower().replace(' ','_')+\"*\"\n })\n\n if args.list and args.list != '':\n search_filters.append({\n 'type': 'terms',\n 'field': 'list_uuid',\n 'value': args.list\n })\n\n if args.value and args.value != '':\n search_filters.append({\n 'type': 'terms',\n 'field': 'value',\n 'value': args.value\n })\n\n if args.data_type and args.data_type != '':\n search_filters.append({\n 'type': 'terms',\n 'field': 'data_type',\n 'value': args.data_type\n })\n\n if args.from_poll and args.from_poll != '':\n search_filters.append({\n 'type': 'term',\n 'field': 'from_poll',\n 'value': args.from_poll\n })\n\n if args.record_id and args.record_id != '':\n search_filters.append({\n 'type': 'term',\n 'field': 'record_id',\n 'value': args.record_id\n })\n\n search = ThreatValue.search() \n\n # Apply all filters\n for _filter in search_filters:\n search = search.filter(_filter['type'], **{_filter['field']: _filter['value']})\n\n search.aggs.bucket('range', 'filter', range={'created_at': {\n 'gte': args.start,\n 'lte': args.end\n }})\n\n if 'list' in args.metrics:\n search.aggs['range'].bucket('lists', 'terms', field='list_uuid', size=args.top)\n\n if 'value' in args.metrics:\n search.aggs['range'].bucket('values', 'terms', field='value', size=args.top)\n\n if 'data_type' in args.metrics:\n search.aggs['range'].bucket('data_types', 'terms', field='data_type', size=args.top)\n\n if 'from_poll' in args.metrics:\n search.aggs['range'].bucket('from_poll', 'terms', field='from_poll', size=args.top)\n\n search = search[0:0]\n \n values = search.execute()\n\n search = ThreatList.search()\n\n search = search.filter('terms', uuid=[v['key'] for v in values.aggs.range.lists.buckets])\n\n search = search[0:args.top]\n lists = list(search.scan())\n\n data = {}\n\n if 'list' in args.metrics:\n data['list'] = {v['key']: v['doc_count'] for v in values.aggs.range.lists.buckets}\n\n if 'value' in args.metrics:\n data['value'] = {v['key']: v['doc_count'] for v in values.aggs.range.values.buckets}\n\n if 'data_type' in args.metrics:\n data['data_type'] = {v['key']: v['doc_count'] for v in values.aggs.range.data_types.buckets}\n\n if 'from_poll' in args.metrics:\n data['from_poll'] = {v['key']: v['doc_count'] for v in values.aggs.range.from_poll.buckets}\n\n data['lists'] = {l.uuid: l.name for l in lists}\n \n return data\n\nlist_value_parser = api.parser()\nlist_value_parser.add_argument('list', location='args', action='split', required=False)\nlist_value_parser.add_argument('value', location='args', action='split', required=False)\nlist_value_parser.add_argument('data_type', location='args', action='split', required=False)\nlist_value_parser.add_argument('from_poll', location='args', type=xinputs.boolean, required=False)\nlist_value_parser.add_argument('value__like', location='args', required=False)\nlist_value_parser.add_argument('list_name__like', location='args', required=False)\nlist_value_parser.add_argument('record_id', location='args', type=str, required=False)\nlist_value_parser.add_argument('organization', location='args', required=False)\nlist_value_parser.add_argument('page', type=int, location='args', default=1, required=False)\nlist_value_parser.add_argument('page_size', type=int, location='args', default=10, required=False)\n@api.route('/values')\nclass IntelListValues(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(list_value_parser)\n @api.marshal_with(mod_list_values_paged)\n @token_required\n @default_org\n @user_has('view_lists')\n def get(self, user_in_default_org, current_user):\n\n args = list_value_parser.parse_args()\n\n lists = None\n\n if args.list_name__like:\n intel_list = ThreatList.search()\n\n if user_in_default_org and args.organization:\n intel_list = intel_list.filter('term', organization=args.organization)\n\n intel_list = intel_list.filter('wildcard', name=args.list_name__like+\"*\")\n lists = list(intel_list.scan())\n \n values = ThreatValue.search()\n\n if args.list:\n if lists:\n args.list += [l.uuid for l in lists]\n values = values.filter('terms', list_uuid=args.list)\n elif lists:\n values = values.filter('terms', list_uuid=[l.uuid for l in lists])\n\n if args.value:\n values = values.filter('terms', value=args.value)\n\n if args.data_type:\n values = values.filter('terms', data_type=args.data_type)\n\n if args.from_poll:\n values = values.filter('term', from_poll=args.from_poll)\n\n if args.record_id:\n values = values.filter('term', record_id=args.record_id)\n\n values, total_results, pages = page_results(values, args.page, args.page_size)\n\n values = values.execute()\n\n response = {\n 'values': list(values),\n 'pagination': {\n 'total_results': total_results,\n 'pages': pages,\n 'page': args['page'],\n 'page_size': args['page_size']\n }\n }\n\n return response\n\n\n@api.route(\"//add_value\")\nclass AddValueToThreatList(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(mod_list_values)\n @token_required\n @user_has('update_list')\n def put(self, uuid, current_user):\n ''' Adds values to a ThreatList '''\n value_list = ThreatList.get_by_uuid(uuid=uuid)\n if value_list:\n\n if 'values' in api.payload and api.payload['values'] not in [None,'']:\n value_list.set_values(api.payload['values'])\n return {'message': 'Succesfully added values to list.'}\n else:\n api.abort(400, {'message':'Values are required.'})\n else:\n api.abort(404, 'ThreatList not found.')\n\n\n@api.route('//remove_value')\nclass RemoveValueFromThreatList(Resource):\n\n @api.doc(security=\"Bearer\")\n @api.expect(mod_list_values)\n @token_required\n @user_has('update_list')\n def delete(self, uuid, current_user):\n ''' Deletes values from a ThreatList '''\n\n if 'values' in api.payload:\n values = ThreatValue.search()\n values = values.filter('term', list_uuid=uuid)\n values = values.filter('terms', values=api.payload['values'])\n values.delete()\n \n return {'message': 'Succesfully removed values from list.'}\n api.abort(400, {'message':'Values are required.'})","repo_name":"reflexsoar/reflex-api","sub_path":"app/api_v2/resource/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":22110,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"20045192373","text":"def integers(list):\n result = []\n for item in list:\n if isinstance(item, int) and item >= 0:\n result.append(item)\n return result\n\n\nmixed_list = [10, \"Hello\", 25, \"world\", 0, \"42\", 7]\ninteger_list = integers(mixed_list)\nprint(integer_list)","repo_name":"jbullzy/pythonpractice","sub_path":"project12.py","file_name":"project12.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"12231812836","text":"numbers = {0: 'zero', 1: 'one', 2: 'two', 3: 'three', 4: 'four',\n 5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'}\n\n\ndef lookup(n):\n \"\"\"\n function to lookup a number in a dictionary and return the value\n :param number: integer\n :return: value from the dictionary\n \"\"\"\n if n in numbers:\n return numbers[n]\n else:\n return \"Number not found\"\n\n\nprint(lookup(2))\nprint(lookup(20))\n","repo_name":"doraithodla/py101","sub_path":"learnpy3/lookup.py","file_name":"lookup.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73412155639","text":"class Solution:\n def restoreArray(self, adjacentPairs: List[List[int]]) -> List[int]:\n graph = collections.defaultdict(list)\n for a, b in adjacentPairs:\n graph[a].append(b)\n graph[b].append(a)\n\n start = adjacentPairs[0][0]\n for idx, val in graph.items():\n if len(val) ==1:\n start = idx\n break\n\t\t\n ans=[]\n visited = set()\n def dfs(num):\n visited.add(num)\n for ver in graph[num]:\n if ver not in visited:\n dfs(ver)\n ans.append(num) \n dfs(start)\n return ans","repo_name":"aymeneliyas/competitive_programming","sub_path":"restore-the-array-from-adjacent-pairs.py","file_name":"restore-the-array-from-adjacent-pairs.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38130847766","text":"from sys import exit\n\nimport pygame\nfrom pygame.locals import *\n\nimport numpy as np\n\nfrom level import Level\nfrom object import Object, ObjectKind, HitboxKind\nfrom viewport import Viewport\nfrom rect import Rect\n\nfrom constants import *\n\npygame.init()\n\nscreen = pygame.display.set_mode(RESOLUTION, pygame.RESIZABLE)\n\nfrom obj_kinds import obj_kinds\nfrom level_parser import parse_level\n\nclock = pygame.time.Clock()\nviewport = Viewport(screen, zoom=9 / 4, position=np.array([200.0, GROUND_HEIGHT - 30 * 15]))\n\nlevel = Level(viewport, parse_level('assets/levels/stereomadness.lvl'))\npygame.mixer.music.load('assets/songs/StereoMadness.mp3')\npygame.mixer.music.play()\npygame.mixer.music.pause()\n\npaused = False\nt = 0\ndone = False\n\nrunning = True\nwhile running:\n dt = clock.tick(MAX_FPS) / 1000\n t += dt\n\n for ev in pygame.event.get():\n if ev.type == QUIT:\n pygame.quit()\n exit(0)\n\n if ev.type == KEYUP:\n if ev.key == K_ESCAPE:\n paused = not paused\n\n if ev.key == K_RIGHT:\n level.tick(dt / PHYSICS_SUBTICKS)\n\n if ev.type == MOUSEBUTTONDOWN:\n # 1 = left click; 2 = middle click; 3 = right click; 4 = scroll up; 5 = scroll down\n if ev.button == 1:\n level.input_activated = True\n\n if ev.type == MOUSEBUTTONUP:\n # 1 = left click; 2 = middle click; 3 = right click; 4 = scroll up; 5 = scroll down\n if ev.button == 1:\n level.input_activated = False\n\n screen.fill(BACKGROUND_COLOR)\n\n if not paused:\n if (not level.stopped) and (not pygame.mixer.music.get_busy()):\n pygame.mixer.music.unpause()\n\n for _ in range(PHYSICS_SUBTICKS):\n level.tick(dt / PHYSICS_SUBTICKS)\n else:\n if pygame.mixer.music.get_busy():\n pygame.mixer.music.pause()\n\n level.draw(viewport)\n\n # if not done and t > 2.0:\n # level.viewport.target_position += 100.0\n # done = True\n\n mouse_pos = viewport.convert_position_from_screen(np.array(pygame.mouse.get_pos()))\n\n pygame.display.set_caption(\n f\"FPS: {round(clock.get_fps())}\")\n\n pygame.display.flip()\n","repo_name":"anatom3000/SquareGame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"44859689165","text":"from flask import Flask, render_template, request\nfrom sklearn.preprocessing import Binarizer\nimport pickle\n\napp = Flask(__name__)\nmodel = None\n\n@app.route('/')\ndef main():\n return render_template('main.html')\n\n\n@app.route('/fw', methods=['GET','POST'])\ndef fw_predict():\n with open('model_fw.pkl', 'rb') as model_fw:\n model = pickle.load(model_fw)\n\n apperances = request.form['출전한리그경기수']\n goals = request.form['골']\n assist = request.form['도움']\n bcm = request.form['빅찬스미스']\n\n pred = model.predict_proba(\n [[apperances, goals, assist, bcm]]\n )\n y_pred = pred[:, 1].reshape(-1, 1)\n binarizer = Binarizer(threshold=0.2).fit(y_pred)\n custom_pred = binarizer.transform(y_pred)\n return render_template('predict.html', data=custom_pred)\n\n\n@app.route('/mf', methods=['GET','POST'])\ndef mf_predict():\n with open('model_mf.pkl', 'rb') as model_mf:\n model = pickle.load(model_mf)\n\n apperances = request.form['출전한리그경기수']\n goals = request.form['골']\n assist = request.form['도움']\n bcc = request.form['기회창출']\n\n pred = model.predict_proba(\n [[apperances, goals, assist, bcc]]\n )\n y_pred = pred[:, 1].reshape(-1, 1)\n binarizer = Binarizer(threshold=0.2).fit(y_pred)\n custom_pred = binarizer.transform(y_pred)\n return render_template('predict.html', data=custom_pred)\n\n\n@app.route('/df', methods=['GET','POST'])\ndef df_predict():\n with open('model_df.pkl', 'rb') as model_df:\n model = pickle.load(model_df)\n\n apperances = request.form['출전한리그경기수']\n blocks = request.form['슈팅을막은횟수']\n pass_per_match = request.form['경기당패스횟수']\n passes = request.form['패스횟수']\n\n pred = model.predict_proba(\n [[apperances, blocks, pass_per_match, passes]]\n )\n y_pred = pred[:, 1].reshape(-1, 1)\n binarizer = Binarizer(threshold=0.85).fit(y_pred)\n custom_pred = binarizer.transform(y_pred)\n return render_template('predict.html', data=custom_pred)\n\n\n@app.route('/gk', methods=['GET','POST'])\ndef gk_predict():\n with open('model_gk.pkl', 'rb') as model_gk:\n model = pickle.load(model_gk)\n\n apperances = request.form['출전한리그경기수']\n saves = request.form['무실점']\n pen_saves = request.form['롱볼']\n high_claims = request.form['자책골']\n\n\n pred = model.predict_proba(\n [[apperances, saves, pen_saves, high_claims]]\n )\n y_pred = pred[:, 1].reshape(-1, 1)\n binarizer = Binarizer(threshold=0.1).fit(y_pred)\n custom_pred = binarizer.transform(y_pred)\n return render_template('predict.html', data=custom_pred)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)","repo_name":"TetorCo/FirstMLOpsProject","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1760043802","text":"\"\"\"\nThe minimum window substring problem \ngiven two substrings s and t , find the value of the substring that is minimum and matches the other\n\"\"\"\n\n#explanation \n\n\"\"\"\n\n\n\"\"\"\n\n\n#input \ns = \"ABSHDASJUKSKSLAAASDF\"\nt = \"ASJU\"\n\n\n","repo_name":"abhishekprakash256/Python","sub_path":"top_coding_questions/minimum_window_substring_problem.py","file_name":"minimum_window_substring_problem.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"73626001399","text":"#!/usr/bin/env python\n\"\"\"\nInstalls yourapp.\n\"\"\"\n\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n\nsetup(\n name=\"wagtail_external_links\",\n version=\"1\",\n description=\"External link formatting for templates\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/kevinhowbrook/wagtail-external-links\",\n author=\"Kevin Howbrook\",\n author_email=\"kbhowbrook@gmail.com\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests*\"]),\n include_package_data=True,\n install_requires=[\"wagtail>=2.4\"],\n)\n","repo_name":"kevinhowbrook/wagtail-external-links","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33949541356","text":"# UVa 540 - Team Queue\n# Queue simulation\nfrom collections import deque\n\n\nclass TeamQueue:\n def __init__(self, num_teams):\n self.order = deque()\n self.q_teams = [deque() for _ in range(num_teams)]\n self.team_dict = {}\n \n def create(self, t, elements):\n for x in elements:\n self.team_dict[x] = t\n\n def enqueue(self, x):\n t = self.team_dict[x]\n self.q_teams[t].append(x)\n if t in set(self.order):\n pass\n else:\n self.order.append(t)\n\n def dequeue(self):\n x = self.q_teams[self.order[0]].popleft()\n if len(self.q_teams[self.order[0]]) == 0:\n self.order.popleft()\n \n return x\n\n\ntc = 1\n\nwhile True:\n num_teams = int(input())\n if num_teams == 0:\n break\n\n print('Scenario #{}'.format(tc))\n\n tq = TeamQueue(num_teams)\n\n for t in range(num_teams):\n inputs = input().split(' ')\n tq.create(t, inputs[1:])\n\n while True:\n op = input().split(' ')\n if op[0] == 'STOP':\n break\n elif op[0] == 'ENQUEUE':\n x = op[1]\n tq.enqueue(x)\n else:\n print(tq.dequeue())\n \n print()\n tc += 1\n","repo_name":"pyCERN/UVa_solutions","sub_path":"UVa/100-1999/500-599/540.py","file_name":"540.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16822741748","text":"import unittest\nfrom big_num import find_big_num\n\nclass TestCase(unittest.TestCase):\n def test_1(self):\n input = [123, 456]\n res = find_big_num(input)\n \n self.assertEqual(res, 654)\n \nif __name__ == '__main__':\n unittest.main()","repo_name":"teamnamingishard/seungwook_coding_test","sub_path":"연습/테스트연습/test_big_num.py","file_name":"test_big_num.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"29671529529","text":"import tkinter as tk\nimport src.views.textstyles as style\n\n\nclass Connect(tk.Frame):\n def __init__(self, parent):\n tk.Frame.__init__(self, parent)\n self.create_widgets()\n\n def create_widgets(self):\n self.spacer = tk.Label(self, height=8, width=26, anchor=tk.E)\n self.spacer.grid(row=0, column=0)\n\n self.lbl_ip = tk.Label(self, height=1, text=\"IP address:\",\n width=15, justify=\"left\", font=style.label_font, anchor=tk.W)\n self.lbl_ip.grid(row=1, column=1, sticky=tk.E+tk.W,\n padx=10, pady=0, columnspan=1)\n self.txt_ip = tk.Entry(\n self, width=36, font=style.entry_font, bg=\"#FFFFFF\")\n self.txt_ip.grid(row=2, column=1, sticky=tk.E,\n padx=10, pady=10, columnspan=1)\n\n self.btn_connect = tk.Button(self, text=\"Connect\", width=10, height=1, font=style.btn_font,\n activebackground=style.btn_style['actbg'], bg=style.btn_style['bg'], fg=style.btn_style['fg'])\n self.btn_connect.grid(row=5, column=1, sticky=tk.S +\n tk.N, padx=5, pady=5, columnspan=1)\n\n def get_info(self):\n ip = self.txt_ip.get()\n return ip\n\n def clear_all(self):\n self.txt_ip.delete(0, tk.END)\n","repo_name":"tienthanh214/Online-Network-Library","sub_path":"Source/client/src/views/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"74868287159","text":"#Reto 1- Area de un triangulo\r\n#A = (b * h) / 2 \r\ndef tipoTriangulo():\r\n a=input('Ingrese el valor del lado a:')\r\n b=input('Ingrese el valor del lado b:')\r\n c=input('Ingrese el valor del lado c:')\r\n \r\n if(a==b and b==c):\r\n print('triángulo Equilatero')\r\n elif(a!=b and b!=c and a!=c):\r\n print('triángulo Escaleno')\r\n else:\r\n print('triángulo Isósceles')\r\n\r\ndef areaTriangulo():\r\n base=float(input('Ingrese la base del triángulo: '))\r\n altura=float(input('Ingrese la altura de un triángulo: '))\r\n area=(base*altura)/2\r\n print('El área del triángulo es: ',area)\r\n \r\nif __name__=='__main__':\r\n areaTriangulo()\r\n tipoTriangulo()","repo_name":"cristian3087/dataAcademy","sub_path":"reto1.py","file_name":"reto1.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"37817642994","text":"#!#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nModule that contains functions related with Maya clusters\n\"\"\"\n\nimport maya.cmds\n\nfrom tp.core import log\nfrom tp.common.math import vec3\nfrom tp.maya.cmds import name as name_lib, shape as shape_utils\n\nLOGGER = log.tpLogger\n\n\nclass ClusterObject(object):\n \"\"\"\n Util class for clustering objects\n \"\"\"\n\n def __init__(self, geometry, name):\n super(ClusterObject, self).__init__()\n\n self._geo = geometry\n self._join_ends = False\n self._name = name\n self._cvs = list()\n self._cv_count = 0\n self._clusters = list()\n self._handles = list()\n\n def create(self):\n \"\"\"\n Creates the clusters\n \"\"\"\n\n self._create()\n\n def get_cluster_list(self):\n \"\"\"\n Returns the names of cluster deformers\n :return: list\n \"\"\"\n\n return self._clusters\n\n def get_cluster_handle_list(self):\n \"\"\"\n Returns the name of cluster handles\n :return: list\n \"\"\"\n\n return self._handles\n\n def _create(self):\n \"\"\"\n Internal function that creates the custer\n Override in child classes\n \"\"\"\n\n return\n\n def _create_cluster(self, cvs):\n return create_cluster(cvs, self._name)\n\n\nclass ClusterSurface(ClusterObject, object):\n \"\"\"\n Util class for clustering a surface\n \"\"\"\n\n def __init__(self, geometry, name):\n super(ClusterSurface, self).__init__(geometry, name)\n\n self._join_ends = False\n self._join_both_ends = False\n self._first_cluster_pivot_at_start = True\n self._last_cluster_pivot_at_end = True\n self._maya_type = None\n\n if shape_utils.has_shape_of_type(self._geo, 'nurbsCurve'):\n self._maya_type = 'nurbsCurve'\n elif shape_utils.has_shape_of_type(self._geo, 'nurbsSurface'):\n self._maya_type = 'nurbsSurface'\n\n self._cluster_u = True\n\n def _create(self):\n self._cvs = maya.cmds.ls('{}.cv[*]'.format(self._geo, flatten=True))\n if self._maya_type == 'nurbsCurve':\n self._cv_count = len(self._cvs)\n elif self._maya_type == 'nurbsSurface':\n if self._cluster_u:\n index = '[0][*]'\n else:\n index = '[*][0]'\n\n self._cv_count = len(maya.cmds.ls('{}.cv{}'.format(self._geo, index), flatten=True))\n\n start_index = 0\n cv_count = self._cv_count\n if self._join_ends:\n if self._join_both_ends:\n self._create_start_and_end_joined_cluster()\n else:\n last_cluster, last_handle = self._create_start_and_end_clusters()\n cv_count = len(self._cvs[2:self._cv_count])\n start_index = 2\n\n for i in range(start_index, cv_count):\n if self._maya_type == 'nurbsCurve':\n cv = '{}.cv[{}]'.format(self._geo, i)\n elif self._maya_type == 'nurbsSurface':\n if self._cluster_u:\n index = '[*][{}]'.format(i)\n else:\n index = '[{}][*]'.format(i)\n cv = '{}.cv{}'.format(self._geo, index)\n else:\n LOGGER.warning('Given NURBS Maya type \"{}\" is not valid!'.format(self._maya_type))\n return\n\n cluster, handle = self._create_cluster(cv)\n self._clusters.append(cluster)\n self._handles.append(handle)\n\n if self._join_ends and not self._join_both_ends:\n self._clusters.append(last_cluster)\n self._handles.append(last_handle)\n\n return self._clusters\n\n def set_join_ends(self, flag):\n \"\"\"\n Sets whether clusters on the end of the surface take up 2 CVs or 1 CV\n :param flag: bool\n \"\"\"\n\n self._join_ends = flag\n\n def set_join_both_ends(self, flag):\n \"\"\"\n Sets whether clusters on the ends of the surface are joined together or not\n :param flag: bool\n \"\"\"\n\n self._join_both_ends = flag\n\n def set_last_cluster_pivot_at_end(self, flag):\n \"\"\"\n Sets whether move the last cluster pivot to the end of the curve\n :param flag: bool\n \"\"\"\n\n self._last_cluster_pivot_at_end = flag\n\n def set_first_cluster_pivot_at_start(self, flag):\n \"\"\"\n Sets whether move the first cluster pivot to the start of the curve\n :param flag: bool\n \"\"\"\n\n self._first_cluster_pivot_at_start = flag\n\n def set_cluster_u(self, flag):\n \"\"\"\n Sets whether cluster u should be used\n :param flag: bool\n \"\"\"\n\n self._cluster_u = flag\n\n def _create_start_and_end_clusters(self):\n \"\"\"\n Internal function used to create start and end clusters\n \"\"\"\n\n start_cvs = None\n end_cvs = None\n start_pos = None\n end_pos = None\n\n if self._maya_type == 'nurbsCurve':\n start_cvs = '{}.cv[0:1]'.format(self._geo)\n end_cvs = '{}.cv[{}:{}]'.format(self._geo, self._cv_count - 2, self._cv_count - 1)\n start_pos = maya.cmds.xform('{}.cv[0]'.format(self._geo), q=True, ws=True, t=True)\n end_pos = maya.cmds.xform('{}.cv[{}]'.format(self._geo, self._cv_count - 1), q=True, ws=True, t=True)\n elif self._maya_type == 'nurbsSurface':\n if self._cluster_u:\n cv_count_u = len(maya.cmds.ls('{}.cv[*][0]'.format(self._geo), flatten=True))\n index1 = '[0:*][0:1]'\n index2 = '[0:*][{}:{}]'.format(self._cv_count - 2, self._cv_count - 1)\n index3 = '[{}][0]'.format(cv_count_u - 1)\n index4 = '[0][{}]'.format(self._cv_count - 1)\n index5 = '[{}][{}]'.format(cv_count_u, self._cv_count - 1)\n else:\n cv_count_v = len(maya.cmds.ls('%s.cv[0][*]' % self._geo, flatten=True))\n index1 = '[0:1][0:*]'\n index2 = '[{}:{}][0:*]'.format(self._cv_count - 2, self._cv_count - 1)\n index3 = '[0][{}]'.format(cv_count_v - 1)\n index4 = '[{}][0]'.format(self._cv_count - 1)\n index5 = '[{}][{}]'.format(self._cv_count - 1, cv_count_v)\n\n start_cvs = '{}.cv{}'.format(self._geo, index1)\n end_cvs = '{}.cv{}'.format(self._geo, index2)\n\n p1 = maya.cmds.xform('{}.cv[0][0]'.format(self._geo), q=True, ws=True, t=True)\n p2 = maya.cmds.xform('{}.cv{}'.format(self._geo, index3), q=True, ws=True, t=True)\n start_pos = vec3.get_mid_point(p1, p2)\n\n p1 = maya.cmds.xform('{}.cv{}'.format(self._geo, index4), q=True, ws=True, t=True)\n p2 = maya.cmds.xform('{}.cv{}'.format(self._geo, index5), q=True, ws=True, t=True)\n end_pos = vec3.get_mid_point(p1, p2)\n\n cluster, handle = self._create_cluster(start_cvs)\n\n self._clusters.append(cluster)\n self._handles.append(handle)\n\n if self._first_cluster_pivot_at_start:\n maya.cmds.xform(handle, ws=True, rp=start_pos, sp=start_pos)\n\n last_cluster, last_handle = self._create_cluster(end_cvs)\n if self._last_cluster_pivot_at_end:\n maya.cmds.xform(last_handle, ws=True, rp=end_pos, sp=end_pos)\n\n return last_cluster, last_handle\n\n def _create_start_and_end_joined_cluster(self):\n start_cvs = None\n end_cvs = None\n\n if self._maya_type == 'nurbsCurve':\n start_cvs = '{}.cv[0:1]'.format(self._geo)\n end_cvs = '{}.cv[{}:{}]'.format(self._geo, self._cv_count - 2, self._cv_count - 1)\n elif self._maya_type == 'nurbsSurface':\n if self._cluster_u:\n index_1 = '[0:*][0]'\n index_2 = '[0:*][{}]'.format(self._cv_count - 1)\n else:\n index_1 = '[0][0:*}'\n index_2 = '[{}][0:*]'.format(self._cv_count - 1)\n\n start_cvs = '{}.cv{}'.format(self._geo, index_1)\n end_cvs = '{}.cv{}'.format(self._geo, index_2)\n\n maya.cmds.select([start_cvs, end_cvs])\n cvs = maya.cmds.ls(sl=True)\n\n cluster, handle = self._create_cluster(cvs)\n self._clusters.append(cluster)\n self._handles.append(handle)\n\n\nclass ClusterCurve(ClusterSurface, object):\n \"\"\"\n Util class for clustering a curve\n \"\"\"\n\n def _create(self):\n self._cvs = maya.cmds.ls('{}.cv[*]'.format(self._geo), flatten=True)\n self._cv_count = len(self._cvs)\n start_index = 0\n cv_count = self._cv_count\n\n if self._join_ends:\n last_cluster, last_handle = self._create_start_and_end_clusters()\n cv_count = len(self._cvs[2:self._cv_count])\n start_index = 2\n\n for i in range(start_index, cv_count):\n cluster, handle = self._create_cluster('{}.cv[{}]'.format(self._geo, i))\n self._clusters.append(cluster)\n self._handles.append(handle)\n\n if self._join_ends:\n self._clusters.append(last_cluster)\n self._handles.append(last_handle)\n\n return self._clusters\n\n def _create_start_and_end_clusters(self):\n cluster, handle = self._create_cluster('{}.cv[0:1]'.format(self._geo))\n\n self._clusters.append(cluster)\n self._handles.append(handle)\n\n pos = maya.cmds.xform('{}.cv[0]'.format(self._geo), q=True, ws=True, t=True)\n maya.cmds.xform(handle, ws=True, rp=pos, sp=pos)\n\n last_cluster, last_handle = self._create_cluster(\n '{}.cv[{}:{}]'.format(self._geo, self._cv_count - 2, self._cv_count - 1))\n pos = maya.cmds.xform('{}.cv[{}]'.format(self._geo, self._cv_count - 1), q=True, ws=True, t=True)\n maya.cmds.xform(last_handle, ws=True, rp=pos, sp=pos)\n\n return last_cluster, last_handle\n\n def set_cluster_u(self, flag):\n \"\"\"\n Override because cluster u is not available on curves\n :param flag: bool\n \"\"\"\n\n LOGGER.warning('Cannot set cluster U, there is only one direction for spans on a curve.')\n\n\ndef create_cluster(points, name, relative=False, front_of_chain=True, exclusive=False):\n \"\"\"\n Creates a cluster on the given points\n :param points: list, names of points to cluster\n :param name: str, name of the cluster\n :param relative: bool, sets whether or not cluster is created in relative mode. In this mode, only the\n transformations directly above the cluster are used by the cluster.\n :param front_of_chain: bool\n :param exclusive: bool, Whether or not cluster deformation set is put in a deform partition. If True, a vertex/CV\n only will be able to be deformed by one cluster.\n :return: list(str, str), [cluster, handle]\n \"\"\"\n\n # NOTE: Bug detected in Maya 2019. If we pass exclusive argument, no matter if we pass True of False, exclusivity\n # will be enabled\n if exclusive:\n cluster, handle = maya.cmds.cluster(\n points, n=name_lib.find_unique_name(name), relative=relative, frontOfChain=front_of_chain, exclusive=True)\n else:\n cluster, handle = maya.cmds.cluster(\n\n points, n=name_lib.find_unique_name(name), relative=relative, frontOfChain=front_of_chain)\n return cluster, handle\n","repo_name":"tpoveda/tp-dcc-tools","sub_path":"packages/tp-dcc-maya/tp/maya/cmds/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":11268,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"25500633418","text":"import requests\n\nclass FetchBookInfo:\n def __init__(self, ttb):\n self.ttb = ttb\n self.api_url = \"http://www.aladin.co.kr/ttb/api/ItemLookUp.aspx\"\n\n def request_params(self):\n return {\n \"TTBKey\": self.ttb,\n \"ItemIdType\": \"Isbn\",\n \"Output\": \"JS\",\n \"OptResult\": \"usedList\",\n \"Version\": \"20131101\"\n }\n\n def fetch(self, isbn):\n params = self.request_params()\n params[\"ItemId\"] = str(isbn)\n\n return requests.get(self.api_url, params=params).json()\n","repo_name":"gptjddldi/aladin","sub_path":"fetcher.py","file_name":"fetcher.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11116732580","text":"from my_dict import *\nfrom time import process_time\nimport matplotlib.pyplot as plt\n\npath = 'C:/Users/alena/Desktop/BMSTU_5sem_analysis_of_algorithms/lab7/report/inc/img/'\ntime_file = path + 'times2.txt'\nfilename_all = path + 'time_all2.png'\nn_repeats = [3,]\nns = [100, 1000, 5000, 10000, 40000]\n\n\ndef find_all_keys(my_dict, alg, exist, keys):\n for key in keys:\n if not exist:\n key += '&'\n value = alg(my_dict, key)\n if exist != (str(value) != '-1'):\n print('err')\n\n\ndef count_time_for_alg(my_dict, alg, keys):\n exist_list = []\n not_exist_list = []\n for n in n_repeats:\n print(n)\n start = process_time()\n for _ in range(n):\n find_all_keys(my_dict, alg, True, keys)\n end = process_time()\n exist_list.append(end-start)\n\n start = process_time()\n for _ in range(n):\n find_all_keys(my_dict, alg, False, keys)\n end = process_time()\n not_exist_list.append(end-start)\n\n return exist_list, not_exist_list\n\n\ndef cmp_time():\n efss = []\n nfss = []\n ebss = []\n nbss = []\n esss = []\n nsss = []\n for n in ns:\n enrus_dict = load_data(n)\n keys = enrus_dict.keys()\n\n efs, nfs = count_time_for_alg(enrus_dict, full_search, keys)\n\n sorted_enrus_dict = sort_by_keys(enrus_dict)\n ebs, nbs = count_time_for_alg(sorted_enrus_dict, binary_search, keys)\n\n segmentated_enrus_dict = segmentate(enrus_dict)\n ess, nss = count_time_for_alg(segmentated_enrus_dict, segment_search, keys)\n\n if len(efs + nfs + ebs + nbs + ess + nss) > 6:\n print('dfhbnvo;spguehrpufghor')\n efss.append(efs[0]/n_repeats[0])\n nfss.append(nfs[0]/n_repeats[0])\n ebss.append(ebs[0]/n_repeats[0])\n nbss.append(nbs[0]/n_repeats[0])\n esss.append(ess[0]/n_repeats[0])\n nsss.append(nss[0]/n_repeats[0])\n\n with open(time_file, 'w') as f:\n f.write(' '.join(list(map(str, ns))) + '\\n')\n f.write(' '.join(list(map(str, efss))) + '\\n')\n f.write(' '.join(list(map(str, nfss))) + '\\n')\n f.write(' '.join(list(map(str, ebss))) + '\\n')\n f.write(' '.join(list(map(str, nbss))) + '\\n')\n f.write(' '.join(list(map(str, esss))) + '\\n')\n f.write(' '.join(list(map(str, nsss))) + '\\n')\n\n\ndef draw_plot_all():\n with open(time_file, 'r') as f:\n ns = list(map(float, f.readline().split()))\n efs = list(map(float, f.readline().split()))\n nfs = list(map(float, f.readline().split()))\n ebs = list(map(float, f.readline().split()))\n nbs = list(map(float, f.readline().split()))\n ess = list(map(float, f.readline().split()))\n nss = list(map(float, f.readline().split()))\n\n plt.xlabel('Количество элементов в словаре')\n plt.xticks(ns)\n # plt.figure(figsize=(7, 10))\n plt.ylabel('Время работы реализации (с)')\n plt.grid()\n\n plt.plot(ns, efs, label='full_e', linestyle='-.', color='green')\n plt.plot(ns, nfs, label='full_n', color='green')\n\n plt.plot(ns, ebs, label='binary_e', linestyle='-.', color='blue')\n plt.plot(ns, nbs, label='binary_n', color='blue')\n\n plt.plot(ns, ess, label='segment_e', linestyle='-.', color='red')\n plt.plot(ns, nss, label='segment_n', color='red')\n\n plt.legend(loc='best')\n plt.savefig(filename_all)\n\n\nif __name__ == \"__main__\":\n cmp_time()\n draw_plot_all()\n\n\n","repo_name":"alena-zayts/BMSTU_5sem_analysis_of_algorithms","sub_path":"lab7/src/time_compare.py","file_name":"time_compare.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"75004496120","text":"#import modules\nfrom datetime import datetime\nfrom scipy import spatial\nimport glob, os, sys, math\nimport settings_CV as sett, numpy as np\n\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#initialize global variables\nsett.init()\n\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#parameters\nsett.p1 = sys.argv[1]\t\t\t\t#fold\np = int(sys.argv[2]) / 1000.0\t\t#parameter p\n\nsett.p2 = 0.2959848493588502 \t#bandwidth\nsett.p3 = 0\n\nsett.dir1 = 'CV_decomp/fold_' + sett.p1\nsett.dir2 = 'CV_result' \n\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#create directories\nif not os.path.exists(sett.dir1):\n os.makedirs(sett.dir1)\nif not os.path.exists(sett.dir2):\n os.makedirs(sett.dir2)\n\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#distance function:\n#x1: x-coordinate data point (known location), y1: y-coordinate data point (known location), t1: t-coordinate data point (known location)\n#x2: x-coordinate regular grid point (unknown location), y2: y-coordinate regular grid point (unknown location), t2: t-coordinate regular grid point (unknown location)\n#c: scaling factor space-time\ndef Distance3D(x1, y1, t1, x2, y2, t2):\n dx = x2 - x1\n dy = y2 - y1\n ti = t2 - t1\n dit = (dx**2 + dy**2 + ti**2) ** 0.5\n return dit\n\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#open output file \noutFile = open(sett.dir2 + os.sep + \"st_idw_\" + str(p) + \"_\" + str(sett.p1) + \".txt\" , \"w\") # Open output file\n\n#performance menasures: Sum of Squared Differences (SSD), Mean Absolute Relative Error (MARE), Root Mean Square Percentage Error (RMSPE) \nSSD = MARE = RMSPE = 0\n\n# loop through all subdomains, compute performance measures\nfor trData, teData in zip(sorted(glob.glob(sett.dir1 + os.sep + \"ptr_*\")), sorted(glob.glob(sett.dir1 + os.sep + \"pte_*\"))): # Loop through all subdomains\n\n #open point files\n trFile = open(trData, \"r\")\n teFile = open(teData, \"r\")\n\n #header: boundaries\n r = trFile.readline().split(\",\")\n e = teFile.readline()\n xmin, xmax, ymin, ymax, zmin, zmax = float(r[0]), float(r[1]), float(r[2]), float(r[3]), float(r[4]), float(r[5].strip())\n \n #read training data\n trX, trY, trZ, trP = [], [], [], []\t\n for record in trFile: \n trX.append(float(record.split(\",\")[0]))\n trY.append(float(record.split(\",\")[1]))\n trZ.append(float(record.split(\",\")[2]))\n trP.append(float(record.split(\",\")[3]))\n trFile.close()\n\n tr_xyzp = zip(trX, trY, trZ, trP)\n tr_xyz = zip(trX, trY, trZ)\n\n #read test data\n te_xyz, te_xyzp = [], []\n teX, teY, teZ, teP = [], [], [], []\n for record in teFile: \t\t\n teX = float(record.split(\",\")[0])\n teY = float(record.split(\",\")[1])\n teZ = float(record.split(\",\")[2])\n teP = float(record.split(\",\")[3])\n te_xyz.append([teX, teY, teZ])\n te_xyzp.append([teX, teY, teZ, teP])\n\n teFile.close()\n\n i = 0\n\n while i < len(te_xyz):\t#for each test point\n\n xC, yC, zC = te_xyz[i][0], te_xyz[i][1], te_xyz[i][2]\n\n distanceSum = 0.0\n productSum = 0.0\n\n j = 0\n while j < len(tr_xyz): \t#for each training point \n \n distance = Distance3D(tr_xyzp[j][0], tr_xyzp[j][1], tr_xyzp[j][2], xC, yC, zC) #calculating 3D distance between voxel and known data points\n \n if distance < sett.p2:\n\n distancePower = (1.0 / distance) ** p\t#power parameter p\n\n distanceSum += distancePower\n productSum += (distancePower) * tr_xyzp[j][3] \n \n j += 1 \n \n if distanceSum > 0.0:\n estimate = productSum / distanceSum\n else:\n estimate = 0.0\n\n #keep a running sum of squared differences between estimate and observed value\n SSD += (estimate - te_xyzp[i][3]) ** 2 \t#sum of squared differences\n MARE += abs(estimate - te_xyzp[i][3]) / te_xyzp[i][3]\n RMSPE += MARE ** 2\n \n i = i + 1\n \noutFile.write(str(SSD) + \",\" + str(MARE) + \",\" + str(RMSPE))\noutFile.close()\n\n","repo_name":"alexandster/interpolateSpaceTime","sub_path":"CV.py","file_name":"CV.py","file_ext":"py","file_size_in_byte":4849,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"32720738589","text":"class Solution:\n def minRemoveToMakeValid(self, s: str) -> str:\n sList = list(s)\n opened = 0\n \n #lets convert our string into a list and only keep track of opened\n #wherever opened goes negative, we add a blank \"\" into the list which will classify as a removal\n for index, char in enumerate(sList):\n if char == \"(\":\n opened +=1\n elif char == \")\":\n opened -=1\n \n if opened < 0:\n sList[index] = \"\"\n opened +=1\n \n #if we have opened > 0, then we need to start removing some ( from the end\n if opened > 0:\n for index in range(len(sList)-1,-1,-1):\n if sList[index] == \"(\":\n sList[index] = \"\"\n opened -=1\n if opened == 0: break\n \n return \"\".join(sList)","repo_name":"vinija/LeetCode","sub_path":"1249-minimum-remove-to-make-valid-parentheses/1249-minimum-remove-to-make-valid-parentheses.py","file_name":"1249-minimum-remove-to-make-valid-parentheses.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"40"} +{"seq_id":"72555874360","text":"import ctypes\nosm = ctypes.CDLL(\"libOSMesa.so\", ctypes.RTLD_GLOBAL)\nfrom vtk import (vtkSphereSource, vtkPolyDataMapper, vtkActor, vtkRenderer,\n vtkRenderWindow, vtkWindowToImageFilter, vtkPNGWriter)\n\nsphereSource = vtkSphereSource()\nmapper = vtkPolyDataMapper()\nmapper.SetInputConnection(sphereSource.GetOutputPort())\n\nactor = vtkActor()\nactor.SetMapper(mapper)\n\nrenderer = vtkRenderer()\nrenderWindow = vtkRenderWindow()\nrenderWindow.SetOffScreenRendering(1)\nrenderWindow.AddRenderer(renderer)\n\nrenderer.AddActor(actor)\nrenderer.SetBackground(1, 1, 1)\n\nrenderWindow.Render()\n\nwindowToImageFilter = vtkWindowToImageFilter()\nwindowToImageFilter.SetInput(renderWindow)\nwindowToImageFilter.Update()\n\nwriter = vtkPNGWriter()\nwriter.SetFileName(\"sphere.png\")\nwriter.SetInputConnection(windowToImageFilter.GetOutputPort())\nwriter.Write()","repo_name":"CompuCell3D/CompuCell3D","sub_path":"cc3d/experimental/offscreen-test.py","file_name":"offscreen-test.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"40"} +{"seq_id":"28733943614","text":"def register(self, what, obj):\n \"\"\"\n Registering a plugin\n\n Params\n ------\n what: Nature of the plugin (backend, instrumentation, repo)\n obj: Instance of the plugin\n \"\"\"\n # print(\"Registering pattern\", name, pattern)\n name = obj.name\n version = obj.version\n enable = obj.enable\n if enable == 'n':\n return\n\n key = Key(name, version)\n self.plugins[what][key] = obj","repo_name":"MichaelFu1998-create/security_scanning","sub_path":"codesearchnet/codesearchnet_20232.py","file_name":"codesearchnet_20232.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"75035298379","text":"import datetime\nfrom apiclient import discovery\nimport template\nimport config\nfrom sendMail import mailer\n\n\nif __name__ == '__main__':\n\n service = discovery.build('calendar', 'v3', developerKey= config.conf['apiKey'] )\n\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\n endDate = datetime.datetime.utcnow() + datetime.timedelta(days=7)\n endDate = endDate.isoformat() + 'Z'\n\n print ('Getting the upcoming events from ', now , 'to ', endDate )\n eventsResult = service.events().list(\n calendarId=config.conf['calendarId'], timeMin=now,timeMax=endDate, singleEvents=True,\n orderBy='startTime').execute()\n events = eventsResult.get('items', [])\n print (\"Number of events found : \", len(events))\n\n mailMessage = template.mailTemplate(events)\n newMail= mailer(\"Nädala sõnumid\", mailMessage.newMessage())\n newMail.SendMail()\n\n\n\n\n\n\n","repo_name":"kolgas/googleCalNewsletter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"2957427433","text":"import matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport datetime\nimport shutil\nimport numpy as np\n\n#-- Define the file name\ncfFileName = 'ugly_log.txt'\ncvFileName = 'cv_log.txt'\n\n#with open(cfFileName) as file:\n# next(file)\n\ntime_cf, x, y, z, roll, pitch, yaw, height = np.loadtxt(cfFileName, delimiter=',', unpack=True)\ntime_cv, posx, posy, posz, cvroll, cvpitch, cvyaw, id2follow = np.loadtxt(cvFileName, delimiter=',', unpack=True) \nti = np.arange(len(time_cv))\n\ntime_cf = [x-time_cf[0] for x in time_cf]\nheight = [x*0.001 for x in height]\nyaw = [-1*x+cvyaw[0] for x in yaw]\n\nplt.scatter(posx,posy,c=ti, cmap=cm.hsv_r)\nplt.show()\n\nrows, cols = 4, 2\n\nfig, axs = plt.subplots(rows, cols, sharex=True)\nfig.suptitle('Log data')\n\n#-- First column [:][0]\naxs[0][0].set_title('X')\n#axs[0][0].plot(time_cf, x, 'tab:red')\naxs[0][0].plot(time_cv, posx, 'tab:green')\n\naxs[1][0].set_title('Y')\n#axs[1][0].plot(time_cf, y, 'tab:red')\naxs[1][0].plot(time_cv, posy, 'tab:green')\n\naxs[2][0].set_title('Z')\naxs[2][0].plot(time_cf, height, 'tab:red')\naxs[2][0].plot(time_cv, posz, 'tab:green')\naxs[2][0].plot(time_cf, z, 'tab:blue')\n#axs[0][0].legend()\n\naxs[3][0].set_title('id2follow')\naxs[3][0].plot(time_cv, id2follow, 'tab:blue')\n\n#-- Second column [:][1]\naxs[0][1].set_title('Roll')\naxs[0][1].plot(time_cv, -cvpitch, 'tab:green')\naxs[0][1].plot(time_cf, roll, 'tab:red')\n\naxs[1][1].set_title('Pitch')\naxs[1][1].plot(time_cv, cvroll, 'tab:green')\naxs[1][1].plot(time_cf, pitch, 'tab:red')\n\naxs[2][1].set_title('Yaw')\naxs[2][1].plot(time_cv, cvyaw, 'tab:green')\naxs[2][1].plot(time_cf, yaw, 'tab:red')\n\naxs[3][1].set_title('id2follow')\naxs[3][1].plot(time_cv, id2follow, 'tab:blue')\n\n#-- Third column [:][2]\n# axs[0][2].set_title('u_x')\n# axs[0][2].plot(time_cf, vx, 'tab:blue')\n\n# axs[1][2].set_title('u_y')\n# axs[1][2].plot(time_cf, vy, 'tab:blue')\n\n# axs[3][2].set_title('id2follow')\n# axs[3][2].plot(time_cv, id2follow, 'tab:blue')\n\n#plt.xlabel('time')\n#plt.ylabel('Roll')\n#plt.title('Interesting Graph\\nCheck it out')\n#plt.legend()\n\n#-- Save figure as png\nnow = datetime.datetime.now()\ndate = now.strftime(\"%Y-%m-%d_%H%M\")\nplt.savefig('figures/uglyPlotter_'+date+'.png')\n\n#-- Copy textfile as backup with same name\nshutil.copy(cfFileName,'figures/cf_log'+date+'.txt')\nshutil.copy(cvFileName,'figures/cv_log'+date+'.txt')\n\nplt.show()\n\n# with open('logFile.txt', 'r') as infile, open('logFileNew.txt', 'w') as outfile:\n# #temp = infile.read().replace(':', \"\") #'[^a-zA-Z0-9_]'\n# temp = infile.read().replace(\"'\", \"\")\n# temp = temp.replace(\":\", \"\")\n# temp = temp.replace(\"{\", \"\")\n# temp = temp.replace(\"}\", \"\")\n# temp = temp.replace(\"stabilizer.roll \", \"\")\n# temp = temp.replace(\"stabilizer.pitch \", \"\")\n# temp = temp.replace(\"stabilizer.yaw \", \"\")\n# #temp = infile.read().replace(\":\", \"\")\n# outfile.write(temp)\n\n# with open('logz.txt', 'r') as infile, open('logznew.txt', 'w') as outfile:\n# #temp = infile.read().replace(':', \"\") #'[^a-zA-Z0-9_]'\n# temp = infile.read().replace(\"'\", \"\")\n# temp = temp.replace(\":\", \"\")\n# temp = temp.replace(\"{\", \"\")\n# temp = temp.replace(\"}\", \"\")\n# temp = temp.replace(\"range.zrange \", \"\")\n# outfile.write(temp)","repo_name":"agrensimon/ugly-dockling","sub_path":"uglyPlotter.py","file_name":"uglyPlotter.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32738944814","text":"# -*- coding: utf-8 -*-\n# @author : yuchangqing\n# @date : 2021/5/26\n# @File : shanghairanking.py\n# @Software : PyCharm\n# SHA256\n\nimport hashlib\n\nimport ctypes\n\n\ndef int_overflow(val):\n maxint = 2147483647\n if not -maxint - 1 <= val <= maxint:\n val = (val + (maxint + 1)) % (2 * (maxint + 1)) - maxint - 1\n return val\n\n\ndef unsigned_right_shift(n, i):\n # 数字小于0,则转为32位无符号uint\n if n < 0:\n n = ctypes.c_uint32(n).value\n # 正常位移位数是为正数,但是为了兼容js之类的,负数就右移变成左移好了\n if i < 0:\n return -int_overflow(n << abs(i))\n # print(n)\n return int_overflow(n >> i)\n\n\ndef _parse(string_code: str):\n i = 0\n n = {}\n while i < len(string_code):\n if not n.get(unsigned_right_shift(i, 2)):\n n[unsigned_right_shift(i, 2)] = 0\n n[unsigned_right_shift(i, 2)] |= (255 & ord(string_code[i])) << 24 - i % 4 * 8\n i += 1\n return n, n.values(), len(string_code)\n\n\ndef _x(t, i):\n _index = unsigned_right_shift(i, 2)\n try:\n val = t[_index]\n except:\n val = 0\n finally_val = unsigned_right_shift(val, 24 - i % 4 * 8)\n return finally_val\n\n\ndef stringify(string_code: str):\n t, _t, n = _parse(string_code)\n _map = \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=\"\n r = []\n i = 0\n while i < n:\n c = 0\n while c < 4 and i + 0.75 * c < n:\n o = (_x(t, i) & 255) << 16 | (_x(t, i + 1) & 255) << 8 | _x(t, i + 2) & 255\n r.append(_map[unsigned_right_shift(o, 6 * (3 - c)) & 63])\n c += 1\n i += 3\n l = _map[64]\n if l:\n r.append(l)\n return \"\".join(r)\n\n\nif __name__ == \"__main__\":\n s = hashlib.sha256() # Get the hash algorithm.\n s.update(\n \"3#67611e7d-9144-4893-a737-0ca577012646#GET /v2010/user_ident #1621938561740\".encode(\"utf8\")) # Hash the data.\n b = s.hexdigest() # Get he hash value.\n # print(b)\n print(stringify(\"3:51d67f75787cea608105bca60c915a1311db25eba7c9edec126ce2bc585482d1:1621938561740\"))\n\n\"51d67f75787cea608105bca60c915a1311db25eba7c9edec126ce2bc585482d1\"\n\n\"3:51d67f75787cea608105bca60c915a1311db25eba7c9edec126ce2bc585482d1:1621938561740\"\n","repo_name":"kingking888/js","sub_path":"逆向/网页实战/软科学排名headers参数加密/shanghairanking.py","file_name":"shanghairanking.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"70041156620","text":"# from os import device_encoding\nimport sys\ninput = sys.stdin.readline\n\ndef cal(count, result, plus, minus, mulitiply, divide):\n global max_result\n global min_result\n\n if count == n:\n max_result = max(max_result, result)\n min_result = min(min_result, result)\n return\n \n else:\n if plus:\n cal(count+1, result + nums[count], plus - 1, minus, mulitiply, divide)\n if minus:\n cal(count+1, result - nums[count], plus, minus - 1, mulitiply, divide)\n if mulitiply:\n cal(count+1, result * nums[count], plus, minus, mulitiply - 1, divide)\n if divide: # 추가해야할 조건) 음수를 양수로 나눌 땐 양수로 바꿔서 계산한 후 나온 몫을 음수로 바꾼다\n cal(count+1, -(-result // nums[count]) if result < 0 else result // nums[count], plus, minus, mulitiply, divide - 1)\n \n\n # +가 2개 올 수도 있는데? -> plus 라는게 들어오는지 확인하려면?\n \n\nif __name__ == '__main__':\n \n n = int(input())\n nums = list(map(int, input().split())) # 계산할 숫자\n calculate = list(map(int, input().split()))\n max_result = float('-inf')\n min_result = float('inf')\n # result = []\n # nums(x) nums[0] ?\n cal(1, nums[0], calculate[0], calculate[1], calculate[2], calculate[3])\n\n print(max_result)\n print(min_result) \n","repo_name":"Yerimi11/sw_jungle_week03","sub_path":"yerim/DFS/14888_연산자끼워넣기.py","file_name":"14888_연산자끼워넣기.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"72536603018","text":"# get raw property data\n\n# general\nimport pandas as pd\nimport time\n\n# requests\nimport requests\n\nif __name__ == \"__main__\":\n\n\t# get requests session\n\tsession = requests.Session()\n\tsession.headers.update({'User-Agent':\n\t 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/53' +\n\t '7.36 (KHTML, like Gecko) Chrome/58.0.3029.110 ' +\n\t 'Safari/537.36'\n\t })\n\n\t# get object IDs for all CIR and CT properties\n\tstr_request = \"https://dcimapapps.countyofdane.com/arcgissrv/rest/services/ParcelLayers/MapServer/3/query?where=PropertyStreetTyp%20%3D%20%27CIR%27%20OR%20PropertyStreetTyp%20%3D%20%27CT%27&outFields=*&returnGeometry=false&returnIdsOnly=true&outSR=4326&f=json\"\n\tr = session.get(str_request)\n\tls_object_ids = r.json()['objectIds']\n\n\t# determine number of items to collect\n\t# and estimate runtime\n\n\tprint(\"len: \" + str(len(ls_object_ids)))\n\tprint(\"estimated download time: \" + str(round(len(ls_object_ids)/60,0)) + \" min\")\n\tprint()\n\n\t# to launch or not launch?\n\n\tstr_launch_code = \"download\"\n\tstr_launch_input = input(\"Enter the launch code to proceed: \")\n\n\tif str_launch_input == str_launch_code:\n\n\t # download loop\n\t for str_object_id in ls_object_ids:\n\n\t str_request = \"https://dcimapapps.countyofdane.com/arcgissrv/rest/services/ParcelLayers/MapServer/3/query?where=OBJECTID%20=%20\" + str(str_object_id).zfill(5) + \"&outFields=*&outSR=4326&f=json\"\n\t r = session.get(str_request)\n\n\t dict_features = r.json()['features'][0]['attributes'] # since we searched for a single object ID, there's only one item in the list hence 0\n\n\t # https://stackoverflow.com/questions/9390126/pythonic-way-to-check-if-something-exists\n\t if 'df_results' not in locals() and 'df_results' not in globals():\n\t # df doesn't exist yet, so create it\n\t df_results = pd.DataFrame(dict_features, index=[0]) # idc about the index\n\t else:\n\t # df does exist, so append\n\n\t # convert to dataframe\n\t df_append = pd.DataFrame(dict_features, index=[0]) # idc about the index\n\t df_results = df_results.append(df_append, ignore_index=True)\n\n\t # sleep\n\t time.sleep(1)\n\n\t # save to CSV\n\t df_results.to_csv(\"../data/madison_raw.csv\")\n\n\t# Example of Response\n\n\t# dict_features = r.json()['features'][0]['attributes'] # since we searched for a single object ID, there's only one item in the list hence 0\n\n\t# # e.g.,\n\t# # dict_features = {'OBJECTID': 5653,\n\t# # 'PARCELNO': '050903411541',\n\t# # 'CurrentParcel': 'Active',\n\t# # 'Owner': 'HIGHLANDS OF NETHERWOOD LLC',\n\t# # 'CoOwner': '',\n\t# # 'ConctOwner': 'HIGHLANDS OF NETHERWOOD LLC',\n\t# # 'Attention': '',\n\t# # 'PropertyAddress': '387 HUMBLE CIR',\n\t# # 'PrimaryAddress': 'Yes',\n\t# # 'PropertyStreetNum': '387',\n\t# # 'PropertyNumSuf': '',\n\t# # 'PropertyPreDir': '',\n\t# # 'PropertyStreetNm': 'HUMBLE',\n\t# # 'PropertyStreetTyp': 'CIR',\n\t# # 'PropertySuffixDir': '',\n\t# # 'PropertyUnitTyp': '',\n\t# # 'PropertyUnitNum': '',\n\t# # 'PropertyZipCode': '',\n\t# # 'PropertyZipExt': '',\n\t# # 'PropertyZipMuni': None,\n\t# # 'BillingStreetAddress': 'STE 101A 161 HORIZON DR',\n\t# # 'BillingCtyStZip': 'VERONA WI 53593',\n\t# # 'BillingCity': 'VERONA',\n\t# # 'BillingState': 'WI',\n\t# # 'BillingZip': '53593',\n\t# # 'Municipality': 'Village of Oregon',\n\t# # 'MunicipalityCode': '165',\n\t# # 'MunicipalityFIPS': '60200',\n\t# # 'MunicipalitySort': 'Oregon, Village of',\n\t# # 'TOWNSHIP': '05',\n\t# # 'TOWNSHIPDIRECTION': 'N',\n\t# # 'RANGE': '09',\n\t# # 'RANGEDIRECTION': 'E',\n\t# # 'SECTION': '03',\n\t# # 'QUARTER160': 'SE',\n\t# # 'Block': '',\n\t# # 'Lot': '115',\n\t# # 'LotType': 'LOT',\n\t# # 'PlatCode': '091430',\n\t# # 'PlatDescription': 'HIGHLANDS OF NETHERWOOD',\n\t# # 'SDStateCode': '4144',\n\t# # 'SchoolDistrict': 'OREGON SCHOOL DIST',\n\t# # 'DDStateCode': None,\n\t# # 'DrainageDistrict': None,\n\t# # 'LegalDescription': 'HIGHLANDS OF NETHERWOOD LOT 115 (0.359 A)\\r\\n',\n\t# # 'Assessed_Acres': 0.359,\n\t# # 'Sum_LandValue': 96400,\n\t# # 'Sum_ImprovementValue': 0,\n\t# # 'Shape.STArea()': 15647.040710449219,\n\t# # 'Shape.STLength()': 489.9580539730449}","repo_name":"chris-carbonell/culdesac-map","sub_path":"python/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"72085830540","text":"import glob\nimport os\n\nBASE=\"/mnt/mouse/data/nnmouse\"\n\nimagesTr=f\"{BASE}/raw/Dataset001_mouse/imagesTr\"\nlabelsTr=f\"{BASE}/raw/Dataset001_mouse/labelsTr\"\nresultsPath=f\"{BASE}/results/Dataset001_mouse/nnUNetTrainer__nnUNetPlans__3d_fullres/fold_0/validation\"\n\nresultIndex = 0\nlabelNode = None\nresultNode = None\nvolumeNode = None\nresultPaths = glob.glob(f\"{resultsPath}/*.nii.gz\")\n\ndef load():\n global resultIndex, labelNode, volumeNode, resultNode, resultPaths\n for node in [labelNode, volumeNode, resultNode]:\n if node:\n slicer.mrmlScene.RemoveNode(node)\n resultPath = resultPaths[resultIndex]\n resultIndex += 1\n print(resultPath)\n resultNode = slicer.util.loadSegmentation(resultPath)\n labelFileName = resultPath.split(\"/\")[-1]\n labelNode = slicer.util.loadSegmentation(f\"{labelsTr}/{labelFileName}\")\n subjectID = labelFileName[:-1*len(\".nii.gz\")]\n volumeFileName = f\"{os.path.split(resultPath)[-1].split('.')[0]}_0000.nrrd\"\n volumeNode = slicer.util.loadVolume(f\"{imagesTr}/{subjectID}_0000.nii.gz\", properties={\"singleFile\": True})\n #slicer.modules.volumes.logic().ApplyVolumeDisplayPreset(volumeNode.GetVolumeDisplayNode(), \"CT_ABDOMEN\")\n resultNode.GetDisplayNode().SetAllSegmentsOpacity2DFill(1.0)\n resultNode.GetDisplayNode().SetAllSegmentsOpacity2DOutline(0.0)\n labelNode.GetDisplayNode().SetAllSegmentsOpacity2DFill(0.0)\n labelNode.GetDisplayNode().SetAllSegmentsOpacity2DOutline(1.0)\n labelNode.GetDisplayNode().SetSliceIntersectionThickness(3)\n\nbutton = qt.QPushButton(\"Next\")\nbutton.connect(\"clicked()\", load)\nbutton.show()\n\nload()\n","repo_name":"pieper/nnmouse","sub_path":"mouse-review.py","file_name":"mouse-review.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"70958772941","text":"from lib import *\n\ndef getHomography(kpsA, kpsB, matches, thresh):\n if len(matches) > 4:\n src_pts = np.float32([ kpsA[m.queryIdx].pt for m in matches]).reshape(-1,1,2)\n dst_pts = np.float32([ kpsB[m.trainIdx].pt for m in matches]).reshape(-1,1,2)\n # estimate the homography between the sets of points\n (M, mask) = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, thresh)\n mask = mask.ravel().tolist()\n return (M, mask)\n else:\n return None\n","repo_name":"ceyxasm/FISB","sub_path":"FISB-Pipeline/homography.py","file_name":"homography.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"27949292136","text":"import nltk\n\ndef unusual_words(text):\n text_vocab = set(w.lower() for w in text if w.isalpha())\n english_vocab = set(w.lower() for w in nltk.corpus.words.words())\n unusual = text_vocab.difference(english_vocab)\n return sorted(unusual)\n\nprint(unusual_words(nltk.corpus.gutenberg.words('austen-sense.txt')))\nprint(unusual_words(nltk.corpus.nps_chat.words()))\n\n# 停用词\nfrom nltk.corpus import stopwords\nprint(stopwords.words('greek'))\n\n# 小练习\npuzzle_letters = nltk.FreqDist('egivrvonl')\nprint([w for w in puzzle_letters])\nobligatory = 'r'\nwordlist = nltk.corpus.words.words()\nprint([w for w in wordlist if len(w)>=6\n and obligatory in w\n and nltk.FreqDist(w) <= puzzle_letters])","repo_name":"bobosod/NLPwPython","sub_path":"pn_2_3.py","file_name":"pn_2_3.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"568382005","text":"import numpy as np\nfrom scipy.interpolate import interp1d\nfrom scipy.fft import fft\nfrom dune.fem.space import lagrange\nfrom ufl import *\n\n################################################################################\n\ndef get_gaussian_process(dim, mu, rho, h):\n \"\"\" Returns the points of an equidistant grid on [0,1]^dim and two\n realizations of a stationary Gaussian random field on this grid using a\n circulant embedding method.\n\n :param int dim: The dimension of the domain of the Gaussian field.\n :param float mu: The constant mean value of the stationary Gaussian\n field.\n :param rho: The function ρ with c(x1,x2) = ρ(x1-x2) for any points x1,x2\n where c denotes the covariance function of the stationary Gaussian\n field.\n :param float h: The grid width.\n :raises NotImplementedError: If dim != 1.\n :raises LinAlgError: If the circulant embedding matrix is not positive\n semidefinite.\n \"\"\"\n if dim != 1:\n raise NotImplementedError\n\n m = int(round(1./h) + 1)\n x = np.linspace(0., 1., m+1) # 1d grid\n\n # first column of covariance matrix and its circulant embedding matrix\n r = rho(x)\n s = np.concatenate((r, np.flip(r[1:-1])))\n\n s_hat = fft(s)\n\n if np.less(s_hat, 0.).any():\n raise np.linalg.LinAlgError(\n 'Circulant embedding matrix is not positive semidefinite!')\n\n eps = np.random.normal(size=2*m) + 1j * np.random.normal(size=2*m)\n e_hat = np.sqrt( 0.5 * s_hat / m ) * eps\n e = fft(e_hat)\n\n return x, mu + np.real(e[:m+1]), mu + np.imag(e[:m+1])\n\n################################################################################\n\ndef get_gaussian_aperture(dim, mu, rho, h, dmin=1e-6, file=None):\n \"\"\" Returns aperture functions d1, d2 on [0,1]^(dim-1) that define the\n geometry of a fracture. The aperture functions are created as linear\n spline interpolants from two realizations of a (dim-1)-dimensional\n stationary Gaussian random field on an equidistant grid. The fracture is\n required to have a positive minimum aperture dmin. In order to guarantee\n this, values of the discrete Gaussian field are substituted accordingly\n if necessary.\n\n :param int dim: The bulk dimension.\n :param float mu: The constant mean value of the stationary Gaussian\n field.\n :param rho: The function ρ with c(x1,x2) = ρ(x1-x2) for any points x1,x2\n where c denotes the covariance function of the stationary Gaussian\n field.\n :param float h: The grid width.\n :param float dmin: The minimum aperture of the fracture. Defaults to\n 1e-6.\n :param file: A filename. The value of the Gaussian aperture and the\n corresponding grid are saved to the file if a filename is provided.\n By default no such file is created.\n :raises NotImplementedError: If dim != 2.\n \"\"\"\n if dim != 2:\n raise NotImplementedError\n\n xh, z1, z2 = get_gaussian_process(dim - 1, mu, rho, h)\n\n overlap = np.where( (z1 + z2) <= dmin )\n\n if np.size(overlap) != 0:\n print(f'Warning: Substituting {np.size(overlap)} points with total ' \\\n 'aperture below dmin={dmin}.')\n correction = 0.5 * ( dmin - z1[overlap] - z2[overlap] )\n z1[overlap] += correction\n z2[overlap] += correction\n\n if file != None:\n np.savez(file, xh=xh, d1=z1, d2=z2)\n\n d1 = interp1d(xh, z1)\n d2 = interp1d(xh, z2)\n\n return d1, d2\n","repo_name":"maximilianhoerl/mmdgpy","sub_path":"mmdgpy/grids/random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":3557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"10062067125","text":"import re\nfrom collections import OrderedDict\n\nimport ruamel.yaml\n\nfrom lib.client import TadoClient\nfrom lib.logger import LOGGER\n\n\nclass DefaultScheduleBuilder:\n \"\"\"Grab the zones and construct a skeleton schedule.\"\"\"\n\n def __init__(self):\n \"\"\"Construct.\"\"\"\n self.client = TadoClient()\n self.default_path = \"conf/schedule-default.yaml\"\n\n LOGGER.info(\"Creating default schedules\")\n zones_data = self.client.zones()\n self.zone_names = list(map(lambda x: x[\"name\"], zones_data))\n\n self.zones = []\n for zone in self.zone_names:\n self.zones.append(self.make_defaults(zone))\n\n LOGGER.info(\"Default schedules now at %s\", self.default_path)\n\n def make_defaults(self, zone):\n \"\"\"Make up some default schedule data.\"\"\"\n return OrderedDict(\n {\n \"zone\": zone,\n \"schedule\": [\n {\"days\": \"all\", \"periods\": [{\"start\": \"07:00\", \"end\": \"23:00\"}]}\n ],\n }\n )\n\n def yamlise(self, path=None):\n \"\"\"Write the data out.\"\"\"\n yaml = ruamel.yaml.YAML()\n yaml.preserve_quotes = True\n with open(\"/tmp/schedule.yaml\", \"w\") as outfile:\n clean_zones = list(map(dict, self.zones))\n yaml.dump(clean_zones, outfile)\n\n content = open(\"/tmp/schedule.yaml\").read()\n lines = content.split(\"\\n\")\n newlines = []\n matcher = re.compile(r\"(.*)(\\d{2}:\\d{2})(.*)\")\n for line in lines:\n newlines.append(matcher.sub(r\"\\1'\\2'\\3\", line))\n\n if not path:\n path = self.default_path # nocov\n\n with open(path, \"w\") as schedule:\n for line in newlines:\n schedule.write(f\"{line}\\n\")\n","repo_name":"pikesley/potado","sub_path":"potado/lib/default_schedule_builder.py","file_name":"default_schedule_builder.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"39713692336","text":"\"\"\"\n\nJeu de Triminos\nPar Laura Brisoux et Thomas Desrumeaux\nCPG2A Promo 64 \n2020\n\n\"\"\"\n##Bibliothèques\nimport math\nfrom tkinter import *\nimport random\nimport tkinter.font as tkFont\n\n## Variables\n\nWidth = 600 #Largeur du plateau de jeu\nHeight = 600 #Hauteur du plateau de jeu\n\nnombre_ligne = 10 #Nombre de lignes horizontales dans le plateau de jeu\n\nHauteur_triangle = Height/nombre_ligne #Calcul de la hauteur d'un trimino\ncote_triangle = Hauteur_triangle*2/(3**0.5)\n\n\nWidth_canvas_selection = 120 #Largeur de l'affichage du trimino sélectionné\nHeight_canvas_selection = 120 #Hauteur de l'affichage du trimino sélectionné\n\ntrimino_selectionne = [[],0,0] #Définition du trimino sélectionné\n\n\nNombre_Joueurs = int(input(\"Nombre de joueurs :\")) \n\n\nPile = [] #Pioche\nPlateau = [] #Variable comportant les triminos posés sur le plateau\nTour = 0 #C'est le tour du joueur n. Si Tour = 0 alors le jeu n'est pas lancé \nTriminos_distribues = 5 \n\ncolor=['yellow','blue','green','red','orange','brown','purple','pink','black'] #Liste des couleurs possible sur un trimino\n\n\nfenetre = Tk() #Création de la fenêtre\nfenetre.resizable(False,False) #Fenêtre non modifiable en taille \nfenetre.title(\"Triminos Game\")\n\nLabelText = StringVar() #Texte d'information\n\nfont = tkFont.Font(size=12, weight='bold') #Police du texte affiché\n\n\n\ndef generer_pile(Nombre_triminos = Nombre_Joueurs*10): #Créée une liste comportant n triminos générés aléatoirement(C'est la pioche)\n Pile.clear()\n for i in range(Nombre_triminos):\n L=[]\n for j in range(3):\n L.append(color[random.randint(0,len(color)-1)])\n Pile.append(L)\n random.shuffle(Pile)\n\n\ndef Position_dans_repere(x,y): #Détermination position dans le repère non orthogonal\n Y = math.floor((Height-y)/Hauteur_triangle)\n X = math.floor((x-math.tan(math.pi/6)*(Height-y))/cote_triangle) \n return(X,Y)\n\ndef placer_trimino(event, x = 0,y = 0): #Placement du trimino séléctionné après un clic de souris\n global trimino_selectionne\n if (x,y) == (0,0): #Si c'est le joueur qui place un trimino\n if Tour == 0: #Si le joueur essaye de placer un trimino alors que la partie n'a paas commencé\n LabelText.set(Erreur.e)\n return\n x,y = event.x, event.y #Aquisition de la position de la souris\n if trimino_selectionne[0] == []: #Si le joueur n'a pas séléctionné de trimino\n LabelText.set(Erreur.c)\n return\n X,Y = Position_dans_repere(x,y) #Détermination de la position du trimino dans le repère\n for i in range (0,len(Plateau)): \n if (X,Y) == Plateau[i][1] and trimino_selectionne[1]%2 == Plateau[i][0][1]%2: #Vérification si la case choisie est libre\n LabelText.set(Erreur.a)\n return\n if verification_placement_trimino(trimino_selectionne,X,Y,Plateau) != True: #Vérification de la correspondance des couleurs\n LabelText.set(Erreur.d)\n return\n triangle1,triangle2 = position_centre_triangle(X,Y) #Calcul du centre des triangles de la case séléctionée dans le repère\n centre = distance_entre_deux_points(x,y,triangle1,triangle2) #Détermination du triangle souhaité entre les deux présents dans le repère\n else: #Placement du trimino de départ au centre du plateau de jeu\n trimino_selectionne = [Pile.pop(),0,0]\n X,Y = Position_dans_repere(x,y)\n triangle1,triangle2 = position_centre_triangle(X,Y)\n centre = (triangle1,True)\n if (centre[0][1] <= Height and centre[0][0] <= Width) and ((centre[1] == False and trimino_selectionne[1]%2 == 1) or (centre[1] == True and trimino_selectionne[1]%2 == 0)): #Vérification si le trimino ne sort pas du plateau et que le trimino est placé dans le bon sens\n dessiner_triminos(centre[0][0],centre[0][1],trimino_selectionne[0],trimino_selectionne[1],canvas,cote_triangle) #Affichage du trimino sur le plateau\n Plateau.append((trimino_selectionne,(X,Y))) #Ajout du trimino dans le plateau\n trimino_selectionne = [[],0] #Désélection du trimino\n piocher() #Le joueur picohe pour garder 5 triminos en main\n affciher_trimino_selectionne(trimino_selectionne)\n else:\n LabelText.set(Erreur.a)\n \ndef verification_placement_trimino(trimino,X,Y,Plateau): #Vérification de la correspondance des couleurs et si le trimino est bien placé à côté d'un autre \n A_cote_trimino = False\n Score = 0\n if trimino[1]%2 == 0: #Si le trimino est placé à l'endroit\n for i in range(0,len(Plateau)):\n if Plateau[i][0][1]%2 == 1: #On vérifie juste les triminos qui sont à l'envers\n if Plateau[i][1] == (X,Y): #Vérifiaction avec le trimino qui a la même coordonée dans le repère\n A_cote_trimino = True\n Score += 30\n if Plateau[i][0][0][(Plateau[i][0][1]-1)%3] != trimino[0][(trimino[1]-1)%3]:\n return \n if Plateau[i][1] == (X-1,Y): #Trimino à gauche\n A_cote_trimino = True\n Score += 30\n if Plateau[i][0][0][(Plateau[i][0][1])%3] != trimino[0][(trimino[1])%3]:\n return\n if Plateau[i][1] == (X,Y-1): #Trimino en dessous\n A_cote_trimino = True\n Score += 30\n if Plateau[i][0][0][(Plateau[i][0][1]+1)%3] != trimino[0][(trimino[1]+1)%3]:\n return\n \n else: #Si le trimino est placé à l'envers\n for i in range(0,len(Plateau)):\n if Plateau[i][0][1]%2 == 0:\n if Plateau[i][1] == (X,Y): #Vérifiaction avec le trimino qui a la même coordonée dans le repère\n A_cote_trimino = True\n Score += 30\n if Plateau[i][0][0][(Plateau[i][0][1]-1)%3] != trimino[0][(trimino[1]-1)%3]:\n return \n if Plateau[i][1] == (X+1,Y): #Trimino à droite\n A_cote_trimino = True\n Score += 30\n if Plateau[i][0][0][(Plateau[i][0][1])%3] != trimino[0][(trimino[1])%3]:\n return\n if Plateau[i][1] == (X,Y+1): #Trimino au dessus\n A_cote_trimino = True\n Score += 30\n if Plateau[i][0][0][(Plateau[i][0][1]+1)%3] != trimino[0][(trimino[1]+1)%3]:\n return\n Joueurs[trimino[2]-1].score += Score #Augmentation du score du joueur\n Joueurs[trimino[2]-1].Label.set(str(Joueurs[trimino[2]-1].name) + \" Score : \" + str(Joueurs[trimino[2]-1].score))#Actualisation de l'affichage du score\n return A_cote_trimino\n \n \ndef distance_entre_deux_points(sourisx,sourisy,point1,point2): #Détermination du triangle souhaité entre les deux présents dans le repère\n dist1=math.sqrt((point1[0]-sourisx)**2+(point1[1]-sourisy)**2)\n dist2=math.sqrt((point2[0]-sourisx)**2+(point2[1]-sourisy)**2)\n if dist1 Max_score[1]:\n Max_score = [Joueurs[i].name,Joueurs[i].score]\n LabelText.set(\"Victoire de : \" + Max_score[0])\n Tour = 0\n return\n Tour = (Tour+1)%(Nombre_Joueurs+1)\n if Tour == 0:\n Tour = 1\n LabelText.set(\"C'est au tour de \" + str(Joueurs[Tour-1].name) + \" de jouer\")\n actualiser_mains()\n\ndef commencer_partie(): #On commence une nouvelle partie en vidant la pioche,le plateau, les main, les scores et on redonne des triminos aux joueurs\n global Tour\n Tour = 0\n trimino_selectionne = [[],0,0]\n affciher_trimino_selectionne(trimino_selectionne)\n Plateau.clear()\n creer_plateau(nombre_ligne,Height,Width)\n generer_pile()\n distribution()\n placer_trimino(None, x=Width/2,y=Height/2)\n Tour = random.randint(1,Nombre_Joueurs)\n LabelText.set(\"C'est au tour de \" + str(Joueurs[Tour-1].name) + \" de jouer\")\n actualiser_mains()\n\ndef actualiser_mains(): #On actualise les mains des joueurs\n for i in range (0,Nombre_Joueurs):\n Joueurs[i].Actualiser_main()\n \ndef creer_plateau(ligne,hauteur,largeur): #Création des lignes du plateau de jeu\n canvas.create_rectangle(0,0,Width,Height, outline = \"white\", fill = \"white\")\n if largeur > hauteur:\n iteration = 2*int(largeur/cote_triangle)+1\n else:\n iteration = 2*int(hauteur/(Hauteur_triangle))+1\n#creer_repere\n # for i in range(0,iteration):\n # canvas.create_line(i*cote_triangle/2,hauteur-i*Hauteur_triangle,largeur,hauteur-i*Hauteur_triangle,fill = 'red',width = 5)\n # canvas.create_line(i*cote_triangle,hauteur,nombre_ligne*0.5*cote_triangle + i*cote_triangle,0,fill = 'red',width = 5)\n#creer_grille\n for i in range(0,ligne+1):\n canvas.create_line(0,i*hauteur/ligne,largeur,i*hauteur/ligne)\n for i in range(0,iteration):\n canvas.create_line(0,i*2*Hauteur_triangle - (nombre_ligne%2)*Hauteur_triangle,i*cote_triangle - (nombre_ligne%2)*cote_triangle/2,0)\n canvas.create_line(i*cote_triangle,hauteur,0,hauteur-i*2*Hauteur_triangle)\n canvas.create_rectangle(3,3,largeur,hauteur,width = 3) #contour plateau\n canvas_selection.create_rectangle(3,3,Width_canvas_selection,Height_canvas_selection,width = 3) #contour canvas_selection\n\n\ncanvas = Canvas(fenetre, width=Width, height=Height, bg=\"#ffffff\") #création du plateau de jeu\ncanvas.pack(side = \"left\")\nLabel(fenetre, text = \"Trimino séléctionné\", font = font).pack(side = \"top\") #Affichage de texte\ncanvas_selection = Canvas(fenetre, width=Width_canvas_selection, height=Height_canvas_selection, bg=\"#ffffff\")#création du tableau de sélection\ncanvas_selection.pack(side = \"top\")\n\n\nInformation = Label(fenetre,text = LabelText, textvariable = LabelText, font = font, foreground = \"red\").pack(side = \"top\") #Affichage de texte d'information\nButton(fenetre, text = \"Commencer une nouvelle partie\", command = commencer_partie).pack(side = \"bottom\",pady=5) #Création bouton\nButton(fenetre, text = \"Piocher\", command = piocher).pack(side = \"bottom\",pady=5) #Création bouton\n\n## Définition Joueurs \nclass Joueur:\n def __init__(self,numero, name): #determination des caractéristiques d'un joueur\n self.name = str(name)\n self.main = []\n self.numero = numero\n self.score = 0\n self.Label = StringVar()\n self.Label.set(str(self.name) + \" Score : \" + str(self.score)) #affichage du nom et du score du joueur\n self.canvas = Canvas(fenetre, width=cote_triangle*Triminos_distribues, height = Hauteur_triangle*1.2, bg=\"#ffffff\")\n self.name_text = Label(fenetre,textvariable = self.Label, font = font)\n self.canvas.bind(\"\n \n ''',\n menu = [['admin_group', load_lang('return')]]\n ))","repo_name":"openNAMU/openNAMU","sub_path":"route/give_delete_admin_group.py","file_name":"give_delete_admin_group.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"46"} +{"seq_id":"39095447013","text":"from collections import namedtuple\r\n\r\nResponder = namedtuple('Responder', ['name', 'preferences'])\r\n\r\ndef stable_marriage(proposers, responders):\r\n assignments = {}\r\n proposals = {responder.name: None for responder in responders}\r\n proposers_preferences = {proposer['name']: proposer['preferences'] for proposer in proposers}\r\n responders_preferences = {responder.name: responder.preferences for responder in responders}\r\n unassigned_proposers = list(proposers_preferences.keys())\r\n\r\n while unassigned_proposers:\r\n proposer = unassigned_proposers.pop(0)\r\n for responder in proposers_preferences[proposer]:\r\n if proposals[responder] is None:\r\n assignments[proposer] = responder\r\n proposals[responder] = proposer\r\n break\r\n elif responders_preferences[responder].index(proposer) < responders_preferences[responder].index(proposals[responder]):\r\n unassigned_proposers.append(proposals[responder])\r\n assignments[proposer] = responder\r\n proposals[responder] = proposer\r\n break\r\n\r\n return assignments\r\n\r\nproposers = [{'name': '1', 'preferences': ['A', 'B', 'C']},\r\n {'name': '2', 'preferences': ['A', 'B', 'C']},\r\n {'name': '3', 'preferences': ['B', 'C', 'A']},\r\n {'name': '4', 'preferences': ['C', 'B', 'A']},\r\n {'name': '5', 'preferences': ['B', 'C', 'A']}\r\n ]\r\n\r\nresponders = [Responder(name='A', preferences=['1', '2', '3', '4', '5']),\r\n Responder(name='B', preferences=['1', '2', '3', '4', '5']),\r\n Responder(name='C', preferences=['1', '2', '3', '4', '5']),\r\n ]\r\n\r\nprint(stable_marriage(proposers, responders))\r\n","repo_name":"imoken777/cs1","sub_path":"DA_arugrism.py","file_name":"DA_arugrism.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"74753869900","text":"\nimport logging\n\n# if using from tester.py uncoment that:\n# create logger that child of tester loger\nlogger = logging.getLogger('model_main.model_device')\n\n# if using directly uncoment that:\n'''\n# create logger\nlog_level = logging.INFO # logging.DEBUG\nlogging.basicConfig(level=log_level)\nlogger = logging.getLogger('model_main')\nlogger.setLevel(level=log_level)\n'''\n\n\nclass ModelDevice():\n\n def __init__(self, net):\n self.net = net\n\n # FOR Computation nodes\n def deleteCompnode(self, index):\n del self.net.compnodes[index]\n\n def deleteAllCompnodes(self):\n self.net.compnodes = []\n\n def getDeviceCount(self):\n return sum([node.cpuCount+node.gpuCount\n for node in self.net.compnodes])\n\n def getNodeCount(self):\n return len(self.net.compnodes)\n\n def getNodeSpec(self):\n '''\n returns parameter string to slurm:\n -w cnodex cnodey ...\n if no \"any\" node is present in json\n and empty string otherwise\n '''\n paramLine = \"-w \"\n \n if len(self.net.compnodes) == 1:\n node = self.net.compnodes[0]\n if node.name == \"any\":\n return \"\"\n else:\n paramLine += node.name\n else:\n for node in self.net.compnodes:\n paramLine += node.name + \",\"\n paramLine = paramLine[:-1]\n return(paramLine)\n # END FOR\n\n def getDeviceStateSize(self, nodeIdx, deviceType, deviceIdx):\n '''\n counts state sizes for every block that is scheduled\n to a given node and a given device\n '''\n devStateSize = 0\n for (blockIdx, block) in enumerate(self.net.blocks):\n mapping = self.net.mapping[blockIdx]\n if ((nodeIdx == mapping[\"NodeIdx\"])\n and (deviceType == mapping[\"DeviceType\"])\n and (deviceIdx == mapping[\"DeviceIdx\"])):\n cellCount = block.size.getCellCount(self.net.grid.gridStepX,\n self.net.grid.gridStepY,\n self.net.grid.gridStepZ)\n cellCountFull = cellCount[0]*cellCount[1]*cellCount[2]\n devStateSize += cellCountFull*self.net.base.getCellSize()\n return devStateSize\n \n def getMaxStatesCount(self):\n '''\n returns maximum number of states that can be\n stored in memory by ANY computing device\n result = min_{for every computing device D}\n (D.memory/sum_{for every block B inside D} (B.total) )\n '''\n minCapacity = 0\n for nodeIdx, node in enumerate(self.net.compnodes):\n for cpuIdx in range(node.cpuCount):\n memorySize = node.cpuMemory[cpuIdx]\n devStateSize = self.getDeviceStateSize(nodeIdx, \"cpu\", cpuIdx)\n if devStateSize > 0:\n capacity = int(memorySize * 1024 * 1024 * 1024/(8 * devStateSize))\n if (minCapacity == 0) or (capacity < minCapacity):\n minCapacity = capacity\n else:\n capacity = \"infinity\"\n print_args = (node.name, \"cpu\", cpuIdx,\n memorySize, devStateSize, capacity)\n logger.info((\"For node {} {}{} memory is {}GB,\"\n + \" total state size is {} elems,\"\n + \" capacity={}.\").format(*print_args))\n for gpuIdx in range(node.gpuCount):\n memorySize = node.gpuMemory[gpuIdx]\n devStateSize = self.getDeviceStateSize(nodeIdx, \"gpu\", gpuIdx)\n if devStateSize > 0:\n capacity = int(memorySize * 1024 * 1024 * 1024/(8 * devStateSize))\n if (minCapacity == 0) or (capacity < minCapacity):\n minCapacity = capacity\n else: \n capacity = \"infinity\"\n print_args = (node.name, \"gpu\", gpuIdx,\n memorySize, devStateSize, capacity)\n logger.info((\"For node {} {}{} memory is {}GB,\"\n + \" total state size is {} elems,\"\n + \" capacity={}.\").format(*print_args))\n \n return minCapacity # like 100*100*100 000 elements = 8GB < 64GB\n","repo_name":"dglyzin/tracer","sub_path":"envs/hs/model/model_device.py","file_name":"model_device.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"20757870347","text":"import pandas as pd\nimport numpy as np\nimport os\nimport json\nfrom sklearn.preprocessing import MinMaxScaler\nimport collections\nimport math\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pickle\nfrom scipy.integrate import quad\nimport scipy.stats\nimport numpy as np\n\n# Read and load parameters from user\ndef load_params():\n data = {}\n data['directory'] = input(\"Enter directory: \")\n data['window_length'] = int(input(\"Enter window length: \"))\n data['shift_length'] = int(input(\"Enter shift length: \"))\n data['resolution'] = int(input(\"Enter resolution: \"))\n # data['directory'] = \"Z\"\n # data['window_length'] = 3\n # data['shift_length'] = 2\n # data['resolution'] = 3\n return data\n\n# Load gesture files from the directory\ndef load_gestures(directory):\n complete_df = pd.DataFrame()\n for filename in os.listdir(directory):\n if filename.endswith(\".csv\"): \n df = pd.read_csv(directory + \"/\" + filename, header=None)\n sensor_id = list(range(1, len(df)+1))\n gesture_id = [filename[:-4]] * len(df)\n df['sensor_id'] = sensor_id\n df['gesture_id'] = gesture_id\n complete_df = pd.concat([complete_df, df])\n return complete_df\n\n# Create output directories if it does not exist\ndef create_output_directories():\n outdir = './Intermediate'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n# Evaluate normal distribution\ndef normal_distribution_function(x, mean=0, std=0.25):\n value = scipy.stats.norm.pdf(x, mean, std)\n return value\n\n# Get Gaussian band intervals\ndef get_intervals(r):\n # Normal Distribution\n total_intervals = 2*r\n x_min = -1\n x_max = 1\n\n interval_spacing = 2/float(2*r)\n x = np.linspace(x_min, x_max, 100)\n \n intervals = []\n count = 0\n start = -1\n while count < 2*r:\n res, err = quad(normal_distribution_function, -1, start)\n intervals.append(res)\n count += 1\n start += interval_spacing\n \n result = {}\n count = 1\n intervals.append(1)\n for i in range(0, len(intervals)):\n intervals[i] = -1 + 2*intervals[i]\n for i in range(0, len(intervals)-1):\n if i == len(intervals)-1:\n result[count] = [intervals[i], 1]\n continue\n result[count] = [intervals[i], intervals[i+1]-0.000000000000000001]\n count += 1\n return result\n\n# Get a dataframe containing minimum and maximum values for each sensor across\n# the entire dataset\ndef get_min_max_df(df):\n min_max_df = pd.DataFrame(columns = ['sensor_id', 'max', 'min'])\n sensor_ids = df.sensor_id.unique()\n for sensor_id in sensor_ids:\n sensor_df = df.loc[df['sensor_id'] == sensor_id]\n just_sensor_value_df = sensor_df.drop(['sensor_id', 'gesture_id'], axis=1)\n max_sensor_value = just_sensor_value_df.max(axis = 0).max()\n min_sensor_value = just_sensor_value_df.min(axis = 0).min()\n min_max_df = min_max_df.append({'sensor_id': str(sensor_id), 'max': max_sensor_value, 'min': min_sensor_value}, ignore_index=True)\n min_max_df = min_max_df.set_index(['sensor_id'])\n return min_max_df\n\n# Normalize each row\ndef normalize(row, min_max_df):\n sensor_id = row.sensor_id\n max_value = min_max_df.loc[[str(sensor_id)],['max']].values[0][0]\n min_value = min_max_df.loc[[str(sensor_id)],['min']].values[0][0]\n for idx in row.index:\n if idx == 'sensor_id' or idx == 'gesture_id' or pd.isnull(row[idx]):\n continue\n row[idx] = (row[idx] - min_value)/(max_value - min_value)\n row[idx] = row[idx]*2 + -1\n return row\n\n# Quantize each row\ndef quantize(row, interval_dict):\n sensor_id = row.sensor_id\n for idx in row.index:\n if idx == 'sensor_id' or idx == 'gesture_id' or pd.isnull(row[idx]):\n continue\n for key, value in interval_dict.items():\n if row[idx] >= value[0] and row[idx] <= value[1]:\n row[idx] = int(key)\n break\n return row\n\n# Generate word vectors by using the sliding window technique\ndef generate_word_vectors(row, word_vector_dict, window_length, shift_length):\n sensor_id = row.sensor_id\n gesture_id = row.gesture_id\n row = row.drop(labels=['sensor_id', 'gesture_id'])\n i=0\n while i < (len(row.index)-window_length):\n if pd.isnull(row[i]):\n break\n\n temp_key = str((int(gesture_id), sensor_id, i))\n k = i\n temp_list = []\n\n while k < (i + window_length):\n if pd.isnull(row[k]):\n break\n temp_list.append(int(row[k]))\n k += 1\n\n if len(temp_list) < window_length:\n break;\n\n word_vector_dict[temp_key] = tuple(temp_list)\n i += shift_length\n\n return row\n\n# Delete existing word files\ndef delete_all_word_files(directory):\n files_in_directory = os.listdir(directory)\n filtered_files = [file for file in files_in_directory if file.endswith(\".wrd\")]\n for file in filtered_files:\n path_to_file = os.path.join(directory, file)\n os.remove(path_to_file)\n\n# Write gesture word files\ndef write_word_files(directory, word_vector_dict, gesture_ids, sensor_ids):\n delete_all_word_files(directory)\n for key, value in word_vector_dict.items():\n key_tuple = eval(key)\n gesture_id = key_tuple[0]\n sensor_id = key_tuple[1]\n time = key_tuple[2]\n file_path = directory + \"/\" + str(gesture_id) + \".wrd\"\n file1 = open(file_path, \"a\") # append mode \n file1.write(key + \" -> \" + str(value) + \"\\n\")\n file1.close()\n\n# Serialize gesture word dictionary for future tasks\ndef serialize_gesture_word_dictionary(word_vector_dict):\n with open(\"Intermediate/gesture_word_dictionary.json\", \"w\") as write_file:\n json.dump(word_vector_dict, write_file)\n\n# Serialize data paramters for future tasks\ndef serialize_data_parameters(data):\n with open(\"Intermediate/data_parameters.json\", \"w\") as write_file:\n json.dump(data, write_file)\n\n# Generates gesture word dictionary\ndef generate_word_dictionary(data):\n interval_dict = get_intervals(data['resolution'])\n\n print(\"Loading gesture files...\")\n df = load_gestures(data['directory'])\n\n print(\"Normalizing values...\")\n min_max_df = get_min_max_df(df)\n min_max_df.to_csv('Intermediate/min_max.csv')\n\n df = df.apply(lambda x: normalize(x, min_max_df), axis=1)\n df.to_csv('Intermediate/normalized.csv', index=False)\n\n print(\"Quantizing values...\")\n df = df.apply(lambda x: quantize(x, interval_dict), axis=1)\n df.to_csv('Intermediate/quantized.csv', index=False)\n gesture_ids = df.gesture_id.unique()\n sensor_ids = df.sensor_id.unique()\n\n print(\"Generating word files...\")\n word_vector_dict = {}\n df.apply(lambda x: generate_word_vectors(x, word_vector_dict, data['window_length'], data['shift_length']), axis = 1)\n\n print(\"Writing word files...\")\n write_word_files(data['directory'], word_vector_dict, gesture_ids, sensor_ids)\n\n print(\"Serializing objects needed for future tasks...\")\n serialize_gesture_word_dictionary(word_vector_dict)\n serialize_data_parameters(data)\n\n print(\"Task-1 complete!\")\n\ndef main():\n # Menu\n while(True):\n print(\"\\n******************** Task-1 **********************\")\n print(\" Enter 1 to generate word dictionary\")\n print(\" Enter 2 to exit\")\n option = input(\"Enter option: \")\n if option == '1':\n data = load_params()\n create_output_directories()\n generate_word_dictionary(data)\n else:\n break\n\nif __name__ == \"__main__\":\n main()","repo_name":"sandeeppsunny/Gesture-Recognition","sub_path":"task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":7656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"3085592630","text":"import re\nimport os\nimport glob\nimport sys\nfrom jinja2 import Template\n\ndef parse_sh_nve(filenames):\n\tr = (\".* +(?P\\d+) +.* +(?P((\\d+\\.\\d+\\.\\d+\\.\\d+)|(UnicastBGP))) +.*Up +.*L2 +.*\\[(?P\\d+)\\]\")\n\td = {}\n\tfor filename in filenames:\n\t\twith open(filename) as f:\n\t\t\tmatches = re.finditer(r, f.read())\n\t\t\tif (matches):\n\t\t\t\tfor match in matches:\n\t\t\t\t\tkey = match.group('l2vni')\n\t\t\t\t\tif key not in d:\n\t\t\t\t\t\tfill = {} \n\t\t\t\t\t\td[match.group('l2vni')] = fill\n\t\t\t\t\t\tfill['mcast_group'] = match.group('mcast')\n\t\t\t\t\t\tfill['vlan'] = match.group('vlan')\n\treturn d \n\n\ndef cfg_list(filenames, regex):\n\treturn [filename for filename in filenames if re.search(regex, filename) is not None] \n\ndef update_dict(*ds):\n\tres = ds[0].copy()\n\tfor i in range(len(ds)-1):\n\t\tres.update(ds[i+1])\n\treturn res\n\ndef find_additionals(d1,d2):\n\treturn {k:v for k,v in d2.items() if k not in d1}\n\ndef find_intersection(d1,d2):\n\treturn {k:v for k,v in d1.items() if k in d2}\n\ndef create_templates():\n\tvlan_template = \"\"\"\n\t{% for vni, data in target.items() -%}\n\tvlan {{ vni_database[vni].vlan }}\n\t\tvn-segment {{ vni }}\n\t{% endfor -%}\n\t\"\"\"\n\n\tevpn_template = \"\"\"\n\tevpn\n\t{% for vni, data in target.items() -%}\n\t\tvni {{ vni }} l2\n\t\t\trd auto\n\t\t\troute-target import {{ asnum }}:{{ vni }}\n\t\t\troute-target export {{ asnum }}:{{ vni }}\n\t{% endfor -%}\n\t\"\"\"\n\n\tnve_template_no_msir = \"\"\"\n\tinterface nve1\n\t{% for vni, data in target.items() %}\n\t\tmember vni {{ vni }}\n\t\t{% if 'UnicastBGP' == data.mcast_group -%}\n\t\t\tingress-replication protocol bgp\n\t\t{% else -%}\n\t\t\tmcast_group {{ vni_database[vni].mcast_group }}\n\t\t{% endif -%}\n\t{% endfor %}\n\t\"\"\"\n\n\tnve_template_msir = \"\"\"\n\tinterface nve1\n\t{% for vni, data in target.items() %}\n\t\tmember vni {{ vni }}\n\t\tmultisite ingress-replication\n\t\t{% if 'UnicastBGP' == data.mcast_group -%}\n\t\t\tingress-replication protocol bgp\n\t\t{% else -%}\n\t\t\tmcast-group {{ vni_database[vni].mcast_group }}\n\t\t{% endif -%}\n\t{% endfor %}\n\t\"\"\"\n\n\tl3vni_associate = \"\"\"\n\tinterface nve1\n\t{% for vni, data in target.items() %}\n\t\tmember vni {{ vni }} associate-vrf\n\t{% endfor %}\n\t\"\"\"\n\n\tj2_vlan_template = Template(vlan_template)\n\tj2_evpn_template = Template(evpn_template)\n\tj2_nve_template_no_msir = Template(nve_template_no_msir)\n\tj2_nve_template_msir = Template(nve_template_msir)\n\tj2_l3vni_associate = Template(l3vni_associate)\n\n\treturn {'j2_vlan_template': j2_vlan_template,\n\t\t\t'j2_evpn_template': j2_evpn_template,\n\t\t\t'j2_l3vni_associate': j2_l3vni_associate,\n\t\t\t'j2_nve_template_no_msir': j2_nve_template_no_msir,\n\t\t\t'j2_nve_template_msir': j2_nve_template_msir\n\t\t\t}\n\n\ndef render_pre_works_configs_bg(templates, vni_database, target, asnum):\n\treturn [templates['j2_vlan_template'].render(vni_database=vni_database, target=target),\n\t\t\ttemplates['j2_evpn_template'].render(asnum=asnum, target=target),\n\t\t\ttemplates['j2_nve_template_msir'].render(vni_database=vni_database, target=target)]\n\ndef render_pre_works_configs_ag_l3vni_associate(templates, vni_database, target, asnum):\n\treturn [templates['j2_l3vni_associate'].render(vni_database=vni_database, target=target)]\n\ndef render_pre_works_configs_ag_l2vni_vlan_only(templates, vni_database, target, asnum):\n\treturn [templates['j2_vlan_template'].render(vni_database=vni_database, target=target),\n\t\t\ttemplates['j2_evpn_template'].render(asnum=asnum, target=target)]\n\ndef render_main_works_configs_ag_no_msir(templates, vni_database, target, asnum):\n\treturn [templates['j2_nve_template_no_msir'].render(vni_database=vni_database, target=target)]\n\ndef render_main_works_configs_ag_msir(templates, vni_database, target, asnum):\n\treturn [templates['j2_nve_template_msir'].render(vni_database=vni_database, target=target)]\n\ndef write_data_to_file(filename, path, data, postfix):\n\tif os.path.exists(path + \"/\" + filename + postfix):\n\t\tos.remove(path + \"/\" + filename + postfix)\n\n\twith open(path + \"/\" + filename + postfix, 'a') as f:\n\t\tf.write(data)\n\ndef main():\n\tpath = sys.argv[1]\n\tfilenames = glob.glob(path + '/*.*')\n\tsite_filenames = cfg_list(filenames, 'SKO-DATA-AC-014.*')\n\tmpod_filenames = cfg_list(filenames, 'SKO-DATA-BL-.*') + cfg_list(filenames, 'SKO-DATA-AC-MD.*')\n\tbg_filenames = cfg_list(filenames, 'SKO-DATA-BG-[MD1|MD2].*EXT.*')\n\n\tsite = parse_sh_nve(site_filenames)\n\tmpod = parse_sh_nve(mpod_filenames)\n\tbg = parse_sh_nve(bg_filenames)\n\n\tvni_database = update_dict(site, mpod, bg)\n\n\tsite_mpod = find_intersection(site, mpod)\n\tsite_bg = find_intersection(site, bg)\n\n\tag_bg_add = update_dict(site_mpod, site_bg)\n\tbg_add = find_additionals(bg,site_mpod)\n\n\ttemplates = create_templates()\n\n\tfor data in render_pre_works_configs_ag_l2vni_vlan_only(templates, vni_database, ag_bg_add, 65554):\n\t\twrite_data_to_file('pre-ag', path, data, '-l2vni')\n\n\tfor data in render_pre_works_configs_bg(templates, vni_database, ag_bg_add, 65554):\n\t\twrite_data_to_file('pre-bg', path, data, '-all')\n\n\tfor data in render_pre_works_configs_ag_l3vni_associate(templates, vni_database, ag_bg_add, 65554):\n\t\twrite_data_to_file('mw1-l3vni-ag-nve-associate', path, data, '-all')\n\n\tfor data in render_main_works_configs_ag_no_msir(templates, vni_database, ag_bg_add, 65554):\n\t\twrite_data_to_file('mw1-ag-without-MSIR', path, data, '')\n\n\tfor data in render_main_works_configs_ag_msir(templates, vni_database, ag_bg_add, 65554):\n\t\twrite_data_to_file('mw1-ag-with-MSIR', path, data, '')\n\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"zug202/pyeng","sub_path":"msite_optimal.py","file_name":"msite_optimal.py","file_ext":"py","file_size_in_byte":5340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"2451761380","text":"#from oct2py import Oct2Py as oc\n#import random\nimport numpy as np;\nfrom Spike_Isolation.Event_Classification.Yang_NSP.dtree_spikes_master.impurity import impurity;\n\n'''\ndata=np.array([[1,2],[3,4],[5,6],[7,8]])\nlabels=[1,2,1,2]\ncoeff=[1,2,.7]\n'''\ndef randomization(data=None,labels=None,coeff=None):\n\n labels = np.array(labels)\n Label_set = np.unique(labels)\n data = np.array(data)\n \n N=len(data);\n Ndim=len(data[0]);\n\n rvector=np.random.uniform(-1,1,Ndim+1);#(np.random.uniform(0,1,Ndim+1)-0.5)*2\n\n V = data*coeff[:Ndim];\n V = sum(V.T).T + coeff[-1];\n\n R = data*rvector[:Ndim];\n R = sum(R.T).T + rvector[-1];\n\n candidates = -V/R;\n\n candidates,idx = np.sort(candidates), np.argsort(candidates)\n labels_sort = labels[idx];\n\n impurity_alpha = np.ones(N)*np.Inf;#[];\n Pr1,Pr2 = np.ones(len(Label_set)),np.ones(len(Label_set));\n for i in range(1,N):\n for l,label in enumerate(Label_set): #??\n #for l in range(Ndim):\n #label = Label_set[l];\n\n #Low Speed\n# if sum(labels_sort[:i]==label): Pr1[l]=sum(labels_sort[:i]==label)/i;\n# else: Pr1[l]=1e-05;\n# if sum(labels_sort[i:]==label): Pr2[l]=sum(labels_sort[i:]==label)/(N-i);\n# else: Pr2[l]=1e-05;\n #High Speed\n if len(np.where(labels_sort[:i]==label)[0])!=0: Pr1[l]=len(np.where(labels_sort[:i]==label)[0])/i;\n else: Pr1[l]=1e-05;\n if len(np.where(labels_sort[i:]==label)[0])!=0: Pr2[l]=len(np.where(labels_sort[i:]==label)[0])/(N-i);\n else: Pr2[l]=1e-05;\n\n #impurity_alpha.append( -i*sum(Pr1*np.log2(Pr1)) -(N-i)*sum(Pr2*np.log2(Pr2)));\n impurity_alpha[i-1] = -i*sum(Pr1*np.log2(Pr1)) -(N-i)*sum(Pr2*np.log2(Pr2));\n impurity_alpha = np.array(impurity_alpha)/N;\n\n\n min_impurity_alpha,idx = min(impurity_alpha),np.argmin(impurity_alpha);\n alpha = (candidates[idx] + candidates[idx + 1]) / 2;\n coeff = coeff + rvector*alpha;\n impurity_plane = impurity(data,labels,coeff)[0];\n\n return coeff,impurity_plane;\n","repo_name":"yshaeri/Salient-Feature-Selection","sub_path":"Spike_Isolation/Event_Classification/Yang_NSP/dtree_spikes_master/randomization.py","file_name":"randomization.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"72934593420","text":"# encoding=utf-8\n\nimport datetime, json, itertools\n\nfrom urlparse import urlparse, parse_qs\n\nfrom flask import (request, flash, url_for, redirect, \n render_template, abort, session, jsonify, g)\n \nfrom .app import app\n\nfrom instagram import InstagramAPI\nfrom instagram.models import ApiModel\n# from .models import Person, Photo\n\nTAG_NAME = 'gummiognina'\nPAGE_COUNT = 5\n\ndef get_photos(max_tag_id=None):\n \n api = InstagramAPI(client_id=g.instagram_id, \n client_secret=g.instagram_secret, \n redirect_uri='http://www.gummiognina.com/')\n \n data, next_url = api.tag_recent_media(tag_name=TAG_NAME, \n count=PAGE_COUNT, \n max_tag_id=max_tag_id)\n \n def _to_dict(obj):\n d = dict()\n if not isinstance(obj, dict):\n obj = obj.__dict__\n for key, value in obj.iteritems():\n if isinstance(value, (ApiModel, dict)):\n d[key] = _to_dict(value)\n elif isinstance(value, list):\n d[key] = map(unicode, value)\n else:\n d[key] = unicode(value)\n return d\n \n def yield_images():\n for obj in data:\n yield _to_dict(obj)\n \n qs = urlparse(next_url).query\n max_tag_id = parse_qs(qs).get('max_tag_id', [None])[0]\n \n return (_to_dict(obj) for obj in data), max_tag_id\n \n\n@app.route('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\n@app.route('/photos', methods=['GET'])\n@app.route('/photos/page-', methods=['GET'])\ndef photos(page=None):\n photos, max_tag_id = get_photos(page)\n return jsonify(photos=list(photos), max_tag_id=max_tag_id)\n","repo_name":"jokull/gummiognina","sub_path":"gummiognina/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"35278592568","text":"from time import clock, time\nfrom random import random\n## This is to insert into a binary search tree by Python objects \n###\n####################### tree #########################\n# 6 3 1 4 5 2 6 #\n# 3 #\n# 1 4 #\n# 2 5 #\n######################################################\nCounter=0\nclass node:\n key=0\n left=0\n right=0\n\ndef insert(x) :\n global Counter\n p=root\n while p!=0 :\n Counter+=1\n q=p\n if x<=p.key:\n p=p.left\n else: p=p.right\n p=node()\n p.key=x; \n if x<=q.key: \n Counter+=1\n q.left=p # p inserted to the left of q\n else: q.right=p # p inserted to the right of q\n\ndef traverse(p):\n if p!=0:\n p1=p.left\n traverse(p1)\n t.append(p.key)\n p2=p.right\n traverse(p2)\n\nn=int(input('input n '))\nt=[]\nfor i in range(0,n): t=t+[random()]; \nn=len(t)\ntt=clock()\nx=t[0]\nroot=node(); node.key=x; node.right=0; node.right=0;\nfor i in range(1,n):x=t[i];insert(x)\ntraverse(root)\ntt=clock()-tt\nprint (\"CPU times: \", tt)\nprint ('Number of Comparesions: ', Counter)\nprint (\"Finished\")\n","repo_name":"iceman201/COSC262","sub_path":"COSC262-Binary.py","file_name":"COSC262-Binary.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"13081928157","text":"import pyuarm\r\nimport time\r\nimport numpy as np\r\n\r\noffset = 10.2\r\n\r\ndef toXY(xpixel, ypixel, xtotal, ytotal):\r\n\tx = float(xpixel)/xtotal*91.44\r\n\ty = float(ypixel)/ytotal*60.96\r\n\treturn(x,y)\r\n\r\ndef toPolar(x,y):\r\n\trad = np.sqrt((x-45.72)**2 + (y-offset)**2)\r\n\tang = np.arctan2(y-offset, x-45.72)*57.2958\r\n\treturn(rad, ang)\r\n\r\ndef toServos(rad,ang):\r\n\ts1 = ang\r\n\ts2 = -0.00855*(rad**3)+0.484*(rad**2)-9.832*rad+128.410\r\n\ts3 = -0.00738*(rad**3)+0.488*(rad**2)-13.232*rad+211.377\r\n\treturn(s1,s2,s3)\r\n\r\ndef armCoords(xTotal, yTotal, xpixel, ypixel, rotation):\r\n\tx,y = toXY(xpixel, ypixel, xTotal, yTotal)\r\n\trad, ang = toPolar(x,y)\r\n\ts1,s2,s3 = toServos(rad, ang)\r\n\ts4 = rotation\r\n\treturn(s1,s2,s3,s4)\r\n\r\ndef movArm(arm, angle0, angle1, angle2, angle3):\r\n\tarm.set_servo_angle(0, float(angle0))\r\n\tarm.set_servo_angle(1, float(angle1))\r\n\tarm.set_servo_angle(2, float(angle2))\r\n\tarm.set_servo_angle(3, float(angle3))\r\n\r\ndef main():\r\n\r\n\tarm = pyuarm.get_uarm()\r\n\r\n\twhile True:\r\n\t\tloc = input(\"Pickup: XTotal YTotal XPixel YPixel Rotation: \")\r\n\t\tc = loc.split(\" \")\r\n\t\ts1,s2,s3,s4 = armCoords(int(c[0]),int(c[1]),int(c[2]),int(c[3]),int(c[4]))\r\n\r\n\t\tloc = input(\"Drop: XTotal YTotal XPixel YPixel Rotation: \")\r\n\t\td = loc.split(\" \")\r\n\t\ts5,s6,s7,s8 = armCoords(int(d[0]),int(d[1]),int(d[2]),int(d[3]),int(d[4]))\r\n\r\n\t\tif(s1>=s5):\r\n\t\t\tr = 135\r\n\t\tif(s1 x : print(\"a\")\nif b > x : print(\"b\")\nif c > x : print(\"c\")\nif d > x : print(\"d\")\n\n# Exercise 1 START\nstuff = dict()\n# print(stuff['candy']) # error\nprint(stuff.get('candy', -1)) # -1\n\nstuff.update({'item3': 65})\nstuff.update({'item5': 123})\nstuff.update({'item4': 32})\n\nprint(stuff)\nprint(stuff.keys())\nprint(stuff.values())\n\nfor x in stuff:\n print(x)\n# Exercise 1 END\n\n# Exercise 2 START\nfname = input(\"Enter file:\")\nif len(fname) < 1: fname = \"mbox-short.txt\"\nfh = open(fname)\n\nhours = []\nfor line in fh:\n if line.startswith('From '):\n words = line.split(\" \")\n hours.append(words[-2].split(\":\")[0])\n\ndic = {x: hours.count(x) for x in hours}\nprint(dic)\n\n# for k, v in dic.items(): print(k, v)\n\nfor key in sorted(dic.keys()):\n print(\"%s: %s\" % (key, dic[key]))\n# Exercise 2 END\n\n\n# Exercise 3 START\nfname = input(\"Enter file:\")\nif len(fname) < 1: fname = \"mbox-short.txt\"\nfh = open(fname)\n\naddress_list = []\n\nfor line in fh:\n if line.startswith('From '):\n words = line.split(\" \")\n address_list.append(words[1].strip())\n\nd = {x: address_list.count(x) for x in address_list}\nmax_key = max(d, key=lambda k: d[k])\nprint('%s %i' % (max_key, d[max_key]))\n\nmaximum = max(d, key=d.get)\nprint('%s %i' % (maximum, d[maximum]))\nprint(max(d.values()))\n\nmaxcount = max(d.values())\nfor k, v in d.items():\n if v == maxcount:\n print('%s %i' % (k, v))\n# Exercise 3 END\n","repo_name":"raunakshakya/PythonPractice","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"25298076337","text":"# Simple binary tree class, with NON-RECURSIVE operations\n\n# Needed for stack and queue.\nfrom collections import deque\n\n\nclass Node:\n def __init__(self, value, left=None, right=None):\n self.value = value\n self.left = left\n self.right = right\n\n def __repr__(self):\n return str(self.value)\n\n def to_list_depth_first(self):\n stack = deque()\n df_list = []\n stack.append(self)\n print(stack)\n while len(stack)>0:\n node = stack.pop()\n df_list.append(node.value)\n if node.right:\n stack.append(node.right)\n if node.left:\n stack.append(node.left)\n print(stack)\n return df_list\n\n def to_list_df_inorder(self):\n stack = deque()\n df_list = []\n current = self\n while True:\n if current is not None:\n stack.append(current)\n current = current.left\n elif stack:\n current = stack.pop()\n df_list.append(current.value)\n current = current.right\n else:\n break\n return df_list\n\n def to_list_df_postorder(self):\n s1 = deque()\n s2 = deque()\n df_list = []\n s1.append(self)\n while s1:\n node = s1.pop()\n s2.append(node)\n if node.left:\n s1.append(node.left)\n if node.right:\n s1.append(node.right)\n while s2:\n node = s2.pop()\n df_list.append(node.value)\n return df_list\n\n\ndef main() -> None:\n tree = Node(\n 1,\n Node(\n 2,\n Node(4),\n Node(5)\n ),\n Node(\n 3,\n Node(6),\n Node(7)\n )\n )\n print(tree)\n print(tree.to_list_depth_first())\n print(tree.to_list_df_inorder())\n print(tree.to_list_df_postorder())\n\n\nif __name__ == '__main__':\n main()","repo_name":"PKvasnick/Programovani-2","sub_path":"code/Ex8/binary_tree_3.py","file_name":"binary_tree_3.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"4652466148","text":"\"\"\"empty message\n\nRevision ID: 5d0b0496cdbc\nRevises: 6418f5264dfd\nCreate Date: 2017-10-01 22:56:57.416619\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5d0b0496cdbc'\ndown_revision = '6418f5264dfd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=120), nullable=False),\n sa.Column('first_name', sa.String(length=50), nullable=False),\n sa.Column('last_name', sa.String(length=50), nullable=False),\n sa.Column('email', sa.String(length=120), nullable=False),\n sa.Column('password', sa.String(length=120), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('username')\n )\n op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)\n op.add_column('bucketlists', sa.Column('belongs_to', sa.Integer(), nullable=True))\n op.alter_column('bucketlists', 'bucket_name',\n existing_type=sa.VARCHAR(length=240),\n nullable=False)\n op.create_unique_constraint(None, 'bucketlists', ['bucket_name'])\n op.create_foreign_key(None, 'bucketlists', 'users', ['belongs_to'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'bucketlists', type_='foreignkey')\n op.drop_constraint(None, 'bucketlists', type_='unique')\n op.alter_column('bucketlists', 'bucket_name',\n existing_type=sa.VARCHAR(length=240),\n nullable=True)\n op.drop_column('bucketlists', 'belongs_to')\n op.drop_index(op.f('ix_users_email'), table_name='users')\n op.drop_table('users')\n # ### end Alembic commands ###\n","repo_name":"muchai-mercy/bucketlist-backend","sub_path":"migrations/versions/5d0b0496cdbc_.py","file_name":"5d0b0496cdbc_.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"73938013559","text":"# _*_ coding: utf-8 _*_\r\n# @Time : 2019/10/15 2:56\r\n# @Author : Ole211\r\n# @Site : \r\n# @File : sync.py \r\n# @Software : PyCharm\r\n\r\n''' 程序实现异步的方法'''\r\n\r\nfrom threading import Thread\r\nfrom time import sleep\r\n\r\ndef asyn(f):\r\n def wrapper(*args, **kwargs):\r\n thr = Thread(target=f, args=args, kwargs=kwargs)\r\n thr.start()\r\n return wrapper\r\n\r\n@asyn\r\ndef task():\r\n global dist\r\n print('执行任务')\r\n for i in range(5):\r\n dist = i\r\n sleep(0.2)\r\n\r\n\r\ndef A():\r\n global dist\r\n print('----开始----')\r\n task()\r\n print(dist)\r\n print(\"函数A睡了十秒钟。。。。。。\")\r\n print(\"-----结束----\")\r\n\r\ndef B():\r\n n = 0\r\n while n<10:\r\n A()\r\n n = n+1\r\n sleep(1)\r\nB()\r\n\r\n\r\n\r\n'''异步函数实现'''\r\n# import time\r\n# import asyncio\r\n#\r\n# # 定义异步函数\r\n# async def hello():\r\n# # asyncio.sleep(1)\r\n# print('Hello World:%s' % time.time())\r\n#\r\n# def run():\r\n# for i in range(5):\r\n# loop.run_until_complete(hello())\r\n#\r\n# loop = asyncio.get_event_loop()\r\n# if __name__ =='__main__':\r\n# run()","repo_name":"atiger808/raspberry","sub_path":"smartBot/sync.py","file_name":"sync.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9907873420","text":"\"\"\" A program to do basic vector calculations in 3 dimensions: addition, dot product and normalization.\nAuthor: Afika Nyati\nDate: 20th April 2014\"\"\"\n\nfrom math import sqrt # Importing the square-root function from the math library. This way, I don't have to write math.sqrt() when square-rooting.\n\n\ndef main():\n \n A,B = new_vector(2) # Assigns the two vectors created by the new_vector function to A and B.\n print(\"A+B =\",vector_addition(A,B)) # Prints a vector addition done in the vector_addition function using vectors A and B.\n print(\"A.B =\", dot_product(A,B)) # Prints a dot product done in the dot_product function using vectors A and B.\n print(\"|A| =\", normalize(A)) # Prints a normalization done in the normalize function using vectors A and B.\n print(\"|B| =\", normalize(B)) # Prints a normalization done in the normalize function using vectors A and B.\n \n\ndef new_vector(number): # This is a multipurpose function that creates as many vectors as the user wants. The number parameter is the amount of vectors created.\n \n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\" # Assuming the user won't want more than 26 vectors, I have created a string of the alphabet and depending on how many vectors the user wants, creates a new vector named by a letter of the alphabet.\n \n return_values = [] # Initializes a list.\n \n for i in range(number): # A definite loop that creates as many vectors as requested by the user.\n Vector = str.split(input(\"Enter vector \" + alphabet[i] + \":\\n\")) # This statement assigns a list of three string numbers (the vector) to the Vector variable. Each number represents a dimension of the vector.\n \n for i in range(len(Vector)): # This nested deifinite loop coverts the string numbers in the Vector variable into integers so that they can undergo operations.\n Vector[i] = int(Vector[i]) # Changing each string number in the Vector varibale to an integer using the integer function.\n \n return_values.append(Vector) # Appends the the Vector list to the return_values list, thereby creating a list of lists (i.e. A list containing each vector called.\n \n return return_values # Return the list of vectors\n\n\ndef vector_addition(V1,V2):\n \n Addition = [ (V1[0]+V2[0]), (V1[1]+V2[1]), (V1[2]+V2[2])] # Adds the corresponding numbers of the dimesions of each vector together. This is then stored in a list called Addition.\n \n return Addition # Returns the Addition list which represents the vector addition.\n\n\ndef dot_product(V1,V2):\n \n Product = V1[0]*V2[0] + V1[1]*V2[1] + V1[2]*V2[2] # Multiples the corresponding numbers of the dimesions of each vector together. This is then stored in a list called Product.\n \n return Product # Returns the Product list which represents the vector addition.\n\n\ndef normalize(V): \n\n normal_vector = sqrt(V[0]**2 + V[1]**2 + V[2]**2) # Normalizes a vector . Makes use of the math.sqrt function. This is then stored in a list called normal_vector.\n \n return \"{0:0.2f}\".format(normal_vector) # Returns a formatted list that is rounded to two decimal places.\n\n\nmain() ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_6/nytafi001/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1918132409","text":"from torch.nn import CrossEntropyLoss, L1Loss\nimport torch\n\n\nclass ClassificationLoss():\n def __init__(self):\n self.loss_fn = CrossEntropyLoss()\n \n def __call__(self, logits, labels):\n return self.loss_fn(\n input=logits,\n target=labels\n )\n\n\nclass MMDLoss():\n def __init__(self):\n self.loss_fn = L1Loss()\n\n def __call__(self, susas, bpc):\n s_rps = torch.mean(susas, 0)\n b_rps = torch.mean(bpc, 0)\n return self.loss_fn(\n input=s_rps, \n target=b_rps\n )","repo_name":"wywy136/TSFSER","sub_path":"code/loss/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"7833272788","text":"# -*- coding: utf-8 -*-\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nBiGAN\n~~~~~\nAdversarially learned inference network\n\"\"\"\nimport torch\nfrom torch import nn\nfrom connectogen.modules.torch.mlp import MLPNetwork\nfrom connectogen.utils.torch.utils import eps, _listify\n\n\nclass BiGAN(object):\n \"\"\"An adversarially learned inference network (BiGAN).\n\n Attributes\n ----------\n discriminator: JointDiscriminator\n The BiGAN's discriminator network, which is presented a set of\n latent space-manifest space pairs and determines whether each\n pair was produced by the encoder or the generator.\n generator: MLPNetwork\n The BiGAN's generator network, which learns the underlying\n distribution of a dataset through a minimax game played against the\n discriminator.\n encoder: MLPNetwork\n The BiGAN's inferential network, which learns the latent space\n encodings of a dataset through a minimax game played against the\n discriminator.\n latent_dim: int\n Dimensionality of the latent space.\n \"\"\"\n def __init__(self,\n prior,\n hidden=(256, 256),\n bias=False,\n manifest_dim=4950,\n latent_dim=128):\n \"\"\"Initialise an adversarially learned inference network (BiGAN).\n\n Parameters are ordered according to the discriminator and encoder\n networks. For instance, the second hidden parameter denotes the \n number of units in the second hidden layer of D and E. The generator\n network currently uses an inverse architecture, so that the same\n parameter denotes the number of units in its second-to-last hidden\n layer.\n\n Parameters\n ----------\n prior: Distribution\n Distribution object with a `sample` method that takes as input\n matrix dimensions and produces as output samples from a\n distribution with the specified dimensions. Most torch\n Distributions can be used as priors.\n hidden: tuple\n Tuple denoting number of units in each hidden layer (other than\n the final/output layer).\n bias: bool or tuple\n Indicates whether each hidden layer includes bias terms for each\n unit.\n latent_dim: int\n Number of latent features that the generator network samples.\n manifest_dim: int\n Dimensionality of the observed (manifest) data.\n \"\"\"\n n_hidden = len(hidden)\n\n self.prior = prior\n self.latent_dim = latent_dim\n self.manifest_dim = manifest_dim\n self.discriminator = JointDiscriminator(\n hidden=hidden, bias=bias, manifest_dim=self.manifest_dim,\n latent_dim=self.latent_dim)\n self.encoder = MLPNetwork(\n hidden=hidden, bias=bias, in_dim=manifest_dim,\n out_dim=latent_dim, batch_norm=False, dropout=[0] * (n_hidden - 1) + [0.5] * 2)\n self.generator = MLPNetwork(\n hidden=hidden, bias=bias, in_dim=latent_dim,\n out_dim=manifest_dim, batch_norm=False, dropout=[0.5] * 2 + [0] * (n_hidden - 1),\n nonlinearity=['leaky'] * (n_hidden + 1)) #+ ['tanh'])\n\n def train(self):\n self.discriminator.train()\n self.generator.train()\n self.encoder.train()\n\n def eval(self):\n self.discriminator.eval()\n self.generator.eval()\n self.encoder.eval()\n\n def cuda(self):\n self.discriminator.cuda()\n self.generator.cuda()\n self.encoder.cuda()\n\n def zero_grad(self):\n self.discriminator.zero_grad()\n self.generator.zero_grad()\n self.encoder.zero_grad()\n\n def load_state_dict(self, params_g, params_e, params_d):\n self.encoder.load_state_dict(params_e)\n self.generator.load_state_dict(params_g)\n self.discriminator.load_state_dict(params_d)\n\n\nclass JointDiscriminator(nn.Module):\n \"\"\"A discriminator network that learns to identify whether a (latent,\n manifest) pair is drawn from the encoder or from the decoder.\n\n Attributes\n ----------\n x_discriminator: MLPNetwork\n Representational network for manifest-space data.\n z_discriminator: MLPNetwork\n Representational network for latent-space data.\n zx_discriminator: MLPNetwork\n Discriminator that splices together representations of latent- and\n manifest-space data and yields a decision regarding the provenance\n of the data pair.\n \"\"\"\n def __init__(self,\n manifest_dim=4950,\n latent_dim=128,\n hidden=(256, 256,),\n bias=False):\n \"\"\"Initialise a joint discriminator.\n\n Parameters\n ----------\n manifest_dim: int\n Side length of the input image.\n latent_dim: int\n Dimensionality of the latent space.\n hidden: tuple\n Tuple denoting the number of units in each hidden layer of\n the manifest-space representational network.\n bias: bool or tuple\n Indicates whether each hidden layer in the manifest representational\n network includes bias terms.\n \"\"\"\n super(JointDiscriminator, self).__init__()\n self.x_discriminator = MLPNetwork(\n hidden=hidden, bias=bias, in_dim=manifest_dim,\n out_dim=latent_dim*2, batch_norm=False, dropout=0.5)\n self.z_discriminator = MLPNetwork(\n hidden=(latent_dim*2, latent_dim*2), bias=True,\n in_dim=latent_dim, out_dim=latent_dim*2, batch_norm=False, dropout=0.5)\n self.zx_discriminator = MLPNetwork(\n hidden=(latent_dim*4,), bias=True, in_dim=latent_dim*4,\n out_dim=1, batch_norm=False)\n\n def forward(self, z, x):\n z = self.z_discriminator(z)\n x = self.x_discriminator(x)\n zx = torch.cat([z, x], 1) + eps\n zx = self.zx_discriminator(zx)\n return zx\n\n","repo_name":"rciric/deep-generative-connectome","sub_path":"connectogen/models/bigan/bigan.py","file_name":"bigan.py","file_ext":"py","file_size_in_byte":6054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40213499496","text":"from salome.shaper import model\nfrom ModelAPI import *\nfrom GeomAPI import *\n\nmodel.begin()\npartSet = model.moduleDocument()\nPart_1 = model.addPart(partSet)\nPart_1_doc = Part_1.document()\nExtrusion_1 = model.addExtrusion(Part_1_doc, [], model.selection(), 12, 0)\nSketch_1 = model.addSketch(Part_1_doc, model.defaultPlane(\"XOY\"))\nSketchCircle_1 = Sketch_1.addCircle(33.32502963835739, 19.24021483244179, 5)\nSketchConstraintRadius_1 = Sketch_1.setRadius(SketchCircle_1.results()[1], 5)\nSketchLine_1 = Sketch_1.addLine(0, 0, 33.32502963835739, 19.24021483244179)\nSketchLine_1.setAuxiliary(True)\nSketchProjection_1 = Sketch_1.addProjection(model.selection(\"VERTEX\", \"PartSet/Origin\"), False)\nSketchPoint_1 = SketchProjection_1.createdFeature()\nSketchConstraintCoincidence_1 = Sketch_1.setCoincident(SketchLine_1.startPoint(), SketchPoint_1.result())\nSketchConstraintCoincidence_2 = Sketch_1.setCoincident(SketchCircle_1.center(), SketchLine_1.endPoint())\nSketchProjection_2 = Sketch_1.addProjection(model.selection(\"EDGE\", \"PartSet/OX\"), False)\nSketchLine_2 = SketchProjection_2.createdFeature()\nSketchConstraintAngle_1 = Sketch_1.setAngle(SketchLine_2.result(), SketchLine_1.result(), 30)\nExtrusion_1.setNestedSketch(Sketch_1)\nGroup_1 = model.addGroup(Part_1_doc, \"Faces\", [model.selection(\"FACE\", \"Extrusion_1_1/To_Face\")])\nAngularCopy_1 = model.addMultiRotation(Part_1_doc, [model.selection(\"SOLID\", \"Extrusion_1_1\")], model.selection(\"EDGE\", \"PartSet/OZ\"), 12)\nmodel.do()\nPart_1_doc.moveFeature(Group_1.feature(), AngularCopy_1.feature(), True)\nmodel.end()\n\n# must be created 12 groups of faces, 12 results\nassert(Part_1_doc.size(\"Groups\") == 12)\n\nfor i in range(12):\n resShape = modelAPI_Result(Part_1_doc.object(\"Groups\", i)).shape()\n assert(not resShape.isNull())\n # the group result is a compund, check that this is a compound of one face\n aShapeExplorer = GeomAPI_ShapeExplorer(resShape, GeomAPI_Shape.FACE)\n assert(aShapeExplorer.more())\n assert(aShapeExplorer.current().isFace())\n aShapeExplorer.next()\n assert(not aShapeExplorer.more())\n\nassert(model.checkPythonDump())\n","repo_name":"x3-apptech/salome-modules-shaper","sub_path":"src/CollectionPlugin/Test/TestGroupMoveAndSplit1.py","file_name":"TestGroupMoveAndSplit1.py","file_ext":"py","file_size_in_byte":2086,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"5558124740","text":"import csv\r\nimport pandas\r\nimport plotly.express as px\r\n\r\nwith open('class2.csv', newline = '') as a:\r\n readData = csv.reader(a)\r\n newData = list(readData)\r\n\r\nnewData.pop(0)\r\n\r\ntotalMarks = 0\r\ntotalEntries = len(newData)\r\n\r\nfor marks in newData:\r\n totalMarks += float(marks[1])\r\n\r\nmean = totalMarks/totalEntries\r\n\r\nprint(\"Mean is \",mean)\r\n\r\ndf = pandas.read_csv('class2.csv')\r\nfig = px.scatter(df, x = 'Student Number', y = 'Marks')\r\n\r\nfig.show()","repo_name":"siddhantpallod/C105-Standard-Deviation","sub_path":"C105/class2.py","file_name":"class2.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74682527161","text":"import pickle\r\nimport collections\r\nimport string\r\nimport re\r\nimport constants\r\n\r\ndef partitionDataset(dataset): \r\n\ttrainSet = []\r\n\ttestSet = []\r\n\r\n\tfor i in range(len(dataset)):\r\n\t\tif i % (len(dataset)/constants.CATEGORY_COUNT) >= (len(dataset)/constants.CATEGORY_COUNT) * constants.TESTSET_PROPORTION:\r\n\t\t\ttestSet.append(dataset[i])\r\n\t\telse:\r\n\t\t\ttrainSet.append(dataset[i])\r\n\r\n\treturn trainSet, testSet\r\n\r\n\r\ndef learnProbsMulti(trainSet):\r\n\r\n\twordCountByClass = []\r\n\tfor _ in range(constants.CATEGORY_COUNT): wordCountByClass.append(collections.defaultdict(float))\r\n\r\n\tfor entry in trainSet:\r\n\t\tentryList = entry[constants.COMMENT_INDEX].split()\r\n\t\tfor word in entryList:\r\n\t\t\tnormalizedWord = re.sub(r'[^\\w\\s]','',word.lower())\r\n\t\t\twordCountByClass[entry[-1]][normalizedWord] += 1\r\n\r\n\twordProbByClass = []\r\n\tfor _ in range(constants.CATEGORY_COUNT): wordProbByClass.append(collections.defaultdict(float))\r\n\r\n\tfor i in range(constants.CATEGORY_COUNT):\r\n\t\tfor key, val in wordCountByClass[i].iteritems():\r\n\t\t\twordProbByClass[i][key] = (val+constants.LAPLACE)/(len(trainSet)/constants.CATEGORY_COUNT + 2*constants.LAPLACE)\r\n\r\n\t# ignore the most common words with little semantic meaning \r\n\ttopWords = [\"the\", \"be\", \"to\", \"of\", \"and\", \"a\", \"in\", \"that\", \"have\", \"it\", \"for\", \"not\", \"on\", \"with\", \"he\", \"as\", \"you\", \"do\", \"at\", \\\r\n\t\t\"this\", \"but\", \"his\", \"by\", \"from\", \"they\", \"we\", \"say\", \"her\", \"she\", \"or\", \"will\", \"an\", \"my\", \"one\", \"all\", \"would\", \"there\", \"their\", \"what\"]\r\n\tfor i in range(constants.CATEGORY_COUNT):\r\n\t\tfor word in topWords:\r\n\t\t\twordProbByClass[i][word] = 1.0\r\n\r\n\treturn wordProbByClass\r\n\r\n\r\ndef classAndEvalMulti(testSet, wordProbByClass):\r\n\tcountByClass = [0 for _ in range(constants.CATEGORY_COUNT)]\r\n\tcorrectCountByClass = [0 for _ in range(constants.CATEGORY_COUNT)]\r\n\r\n\tfor i in range(len(testSet)):\r\n\t\tentry = testSet[i]\r\n\t\tweightByClass = [1.0 for _ in range(constants.CATEGORY_COUNT)]\r\n\r\n\t\tentryList = entry[constants.COMMENT_INDEX].split()\r\n\t\tfor word in entryList:\r\n\t\t\tfor i in range(constants.CATEGORY_COUNT):\r\n\t\t\t\tweightByClass[i] *= wordProbByClass[i][word] if wordProbByClass[i][word] != 0 else float(constants.LAPLACE)/(constants.TRAINSET_LEN/constants.CATEGORY_COUNT + 2*constants.LAPLACE)\r\n\r\n\t\tclassification = weightByClass.index(max(weightByClass))\r\n\r\n\t\tcountByClass[classification] += 1\r\n\r\n\t\tif entry[-1] == classification:\r\n\t\t\tcorrectCountByClass[classification] += 1\r\n\r\n\treturn countByClass, correctCountByClass\r\n","repo_name":"ericksiavichay/CS-221-Final-Project-","sub_path":"naive_bayes/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"3124747962","text":"# _*_ coding: utf-8 _*_\n# @Time : 2022/5/22 0:16\n# @Author : 李明昊 Richard Li\n# @Version:V 0.1\n# @File : config_operator.py\n# @desc : 配置文件操作\n\nimport configparser\nimport os\n\n# 配置文件位置\nCONFIG_PATH = 'config.conf'\n\n\ndef store_api_info(client_id: str, client_secret: str):\n \"\"\"\n 存储token值\n :param client_id: 客户端id\n :param client_secret: 客户端密钥\n :return:\n \"\"\"\n parser = configparser.ConfigParser()\n # baidu_api section\n parser.add_section('baidu_api')\n parser.set('baidu_api', 'client_id', client_id)\n parser.set('baidu_api', 'client_secret', client_secret)\n with open(CONFIG_PATH, 'w') as f:\n parser.write(f)\n\n\ndef read_api_info():\n \"\"\"\n 读取配置信息\n :return: (client_id, client_secret)\n \"\"\"\n if os.path.exists(CONFIG_PATH):\n parser = configparser.ConfigParser()\n parser.read(CONFIG_PATH)\n return parser.get('baidu_api', 'client_id'), parser.get('baidu_api', 'client_secret')\n else:\n return None, None\n\n\ndef store_token(token: str):\n \"\"\"\n 存数获取到的token\n :return:\n \"\"\"\n with open('token.conf', 'w') as f:\n f.write(token)\n\n\ndef get_token():\n \"\"\"\n 从文件中读取储存的token\n :return:\n \"\"\"\n token = None\n if os.path.exists('token.conf'):\n with open('token.conf', 'r') as f:\n token = f.read()\n return token\n\nif __name__ == '__main__':\n store_api_info('WK1OMGhyqSWMOfDxKabUHeXY', 'TMFD2gPgzAgsOx4zAvQFNr5XsiUmL1Gn')\n","repo_name":"MoJeffrey/P60","sub_path":"code/config_operator.py","file_name":"config_operator.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"26735695808","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name = \"index\"),\n path(\"guru-login\", views.guruLogin, name = \"guru_login\"),\n path(\"guru-logout\", views.guruLogout, name = \"guru_logout\"),\n path(\"mapel\", views.mapel, name = \"mapel\"),\n path(\"mapel/hapus/\", views.hapusMapel, name = \"hapus_mapel\"),\n path(\"mapel/\", views.materi, name = \"materi\"),\n path(\"mapel//edit/\", views.editMateri, name = \"edit_materi\"),\n path(\"mapel//hapus/\", views.hapusMateri, name = \"hapus_materi\"),\n path(\"mapel//\", views.materiFileSiswa, name = \"materi_filesiswa\"),\n path(\"mapel///export_nilai\", views.exportNilaiSiswa, name = \"export_nilaisiswa\"),\n path(\"mapel///hapus/\", views.hapusFileSiswa, name = \"hapus_filesiswa\"),\n path(\"mapel///download/\", views.downloadFileSiswa, name = \"download_filesiswa\"),\n path(\"mapel///archive\", views.archiveMateri, name = \"archive_materi\"),\n path(\"mapel///download_archive\", views.downloadArchiveMateri, name = \"download_archive\"),\n path(\"mapel///hapus_archive\", views.deleteArchiveMateri, name = \"hapus_archive\"),\n path(\"pengaturan\", views.pengaturan, name = \"pengaturan\"),\n path(\"form-pengumpulan/\", views.formPengumpulan, name = \"form_pengumpulan\"),\n path(\"form-success\", views.terimaKasih, name = \"form_terimakasih\"),\n path(\"form-expired\", views.expired, name = \"form_expired\"),\n]","repo_name":"arrazy100/sigas-sukosari-2021","sub_path":"task_management/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"70723071161","text":"import cv2\nimport os\nimport time\nfrom ultralytics import YOLO\n\nmodel = YOLO(\"best.pt\")\n\n# Initialize metrics variables\ntp, fp, fn, total_frames, precision, recall, f1_score = 0, 0, 0, 0, 0, 0, 0\n\n# Initialize variables for calculating precision, recall, and F1 score\ntotal_frames = 0\nprecision = 0\nrecall = 0\nf1_score = 0\n\nvid = cv2.VideoCapture(\"shortvideo.mp4\")\n\n# Create output and frames directory if they don't exist\nfor dir_name in ['output', 'frames']:\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\n\n# VideoWriter setup\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nout_video = cv2.VideoWriter('output/output_video.avi', fourcc, 20.0, (1080, 720))\n\n\n# Frame directory setup\nframe_count = 0\ndef drawBoxes(frame, results):\n global tp, fp, total_frames\n\n\n boxes = results[0].boxes.xyxy.cpu().numpy().astype(int)\n confi = results[0].boxes.conf.cpu().numpy()\n names = results[0].boxes.cls.cpu().numpy()\n\n for box, name, conf in zip(boxes, names, confi):\n cls = results[0].names[name]\n confidence = conf\n text = f\"{cls} {confidence:.2f}\"\n\n if confidence > 0.5 and cls == \"Player\":\n tp += 1 # For the purpose of this example, treating every detection as TP.\n\n color = (0, 0, 255)\n p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))\n cv2.rectangle(frame, p1, p2, color, 2)\n\n tf = 1\n w, h = cv2.getTextSize(text, 0, fontScale=tf / 3, thickness=1)[0]\n outside = p1[1] - h >= 3\n p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3\n cv2.rectangle(frame, p1, p2, color, -1, cv2.LINE_AA)\n cv2.putText(frame, text, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, tf / 3, (255, 255, 255), thickness=tf, lineType=cv2.LINE_AA)\n\n total_frames += 1\n return frame\n\n\nstart_time = time.time() \nwhile True:\n ret, frame = vid.read()\n if not ret or frame is None:\n print(\"Error reading frame or video ended.\")\n break\n\n try:\n height, width, _ = frame.shape\n if width < 1080 or height < 720:\n frame = cv2.resize(frame, (1080, 720))\n res = model.predict(frame, conf=0.3, imgsz=1088) # Adjusted to multiple of 32\n final_IMG = drawBoxes(frame, res)\n\n elapsed_time = time.time() - start_time\n current_fps = 1 / elapsed_time\n start_time = time.time()\n cv2.putText(final_IMG, f\"FPS: {current_fps:.2f}\", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)\n \n # Display the resulting frame in real time\n cv2.imshow('Result', final_IMG)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n # Save the frame as an image\n cv2.imwrite(os.path.join('frames', f'frame_{frame_count}.jpg'), final_IMG)\n frame_count += 1\n\n\n\n # Write the frame to the output video\n out_video.write(final_IMG)\n \n # Save the frame as an image\n cv2.imwrite(os.path.join('frames', f'frame_{frame_count}.jpg'), final_IMG)\n frames_dir = 'frames'\n\n\n except cv2.error as e: \n print(\"OpenCV Error:\", e)\n print(\"Shape of original frame:\", frame.shape)\n print(\"Shape of final_IMG:\", final_IMG.shape)\n except Exception as e: \n print(f\"Error occurred: {e}\")\n\n# After the while loop ends\nvid.release()\nout_video.release()\ncv2.destroyAllWindows()\n\n\n\n\n# Compute metrics\nif tp + fp > 0:\n precision = tp / (tp + fp)\nif tp + fn > 0:\n recall = tp / (tp + fn)\nif precision + recall > 0:\n f1_score = 2 * precision * recall / (precision + recall)\nfn = fp # As an example, equating FP to FN.\n\nprint(f\"Total number of frames tested: {total_frames}\")\nprint(f\"True positives: {tp}\")\nprint(f\"False positives: {fp}\")\nprint(f\"False negatives: {fn}\")\nprint(f\"Precision: {precision:.2%}\")\nprint(f\"Recall: {recall:.2%}\")\nprint(f\"F1 score: {f1_score:.2%}\")\n","repo_name":"tariqeee/SoccerPlayer","sub_path":"yolofinal.py","file_name":"yolofinal.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1037730224","text":"#!/usr/bin/python3\n\nimport re\nimport api\nimport txt\nimport time\nimport user\nimport log\nimport state\nimport trade\nimport broker\nimport feed\nimport gui\n\n\ntrade = trade.Trade()\nfeed = feed.Feed()\napi = api.API()\ntxt = txt.TXT()\n\n\n\nclass Strategy():\n\n\n def __init__(self):\n # feed.initialize_databases('bittrex_tape.db', 'binance_tape.db')\n self.refresh_rate = 0.1\n self.mode = user.mode\n self.entry_lookback = user.entry_lookback\n self.exit_lookback = user.exit_lookback\n self.volume_spike_magnitude_coefficient = user.volume_spike_magnitude_coefficient \n self.price_spike_magnitude_coefficient = user.price_spike_magnitude_coefficient\n self.volume_drop_magnitude_coefficient = user.volume_drop_magnitude_coefficient \n self.price_drop_magnitude_coefficient = user.price_drop_magnitude_coefficient\n\n self.close_from_the_top_percentage = user.close_from_the_top_percentage\n\n self.all_coin_data = broker.get_data_for_all_coins('bittrex') # used to get all the symbols\n self.queue = 'free'\n self.can_trade = 'yes'\n\n self.btc_paper_balance = 1.0\n self.paper_trade_open_price = 0.0\n self.old_altcoin_price = 0\n self.new_altcoin_price = 0\n\n self.bought_units = 0\n\n self.last_traded_pair = ''\n\n self.all_time_high = 0\n\n # write out strategy settings for log:\n text = 'Strategy settings: ' + ' spike percent coefficient: ' + str(self.price_spike_magnitude_coefficient) + ' exit price drop coefficient: ' + str(self.price_drop_magnitude_coefficient) \n txt.write_next_line('paper_trading', text)\n\n\n\n\n def current_volume(self, broker, pair):\n last_volume = feed.get_last_record(broker, pair, 'volume', 0)\n return last_volume\n\n\n\n def current_price(self, broker, pair):\n last_price = feed.get_last_record(broker, pair, 'price', 0)\n return last_price\n\n\n\n def average_volume(self, broker, pair, number_of_data_points):\n total = 0\n for i in range(number_of_data_points):\n total += feed.get_last_record(broker, pair,'volume', i)\n average = total / number_of_data_points\n return average\n\n\n\n def average_price(self, broker, pair, number_of_data_points):\n total = 0\n for i in range(number_of_data_points):\n total += feed.get_last_record(broker, pair,'price', i)\n average = total / number_of_data_points\n return average\n\n\n\n def spike_in_price(self, broker, pair):\n if user.run_logs:\n print('\\nchecking for spike in price')\n print('self.current_price(broker, pair): ', self.current_price(broker, pair))\n print('self.average_price(broker, pair, self.entry_lookback): ', self.average_price(broker, pair, self.entry_lookback))\n print('self.price_spike_magnitude_coefficient): ', self.price_spike_magnitude_coefficient)\n print('if self.current_price(broker, pair) > (self.average_price(broker, pair, self.entry_lookback) * self.price_spike_magnitude_coefficient): ')\n if self.current_price(broker, pair) > (self.average_price(broker, pair, self.entry_lookback) * self.price_spike_magnitude_coefficient):\n if user.run_logs:\n print('True\\n')\n print('\\nSpike in price on: ', pair)\n return True\n else:\n if user.run_logs:\n print('False\\n')\n return False\n\n\n \n def spike_in_volume(self, broker, pair):\n print('checking for spike in volume')\n if self.current_volume(broker, pair) > (self.average_volume(broker, pair, self.entry_lookback) * self.volume_spike_magnitude_coefficient):\n return True\n else:\n return False\n\n\n\n def drop_in_price(self, broker, pair):\n if self.current_price(broker, pair) < (self.average_price(broker, pair, self.exit_lookback) * self.price_drop_magnitude_coefficient):\n return True\n else:\n return False\n\n\n\n def drop_in_volume(self, broker, pair):\n if self.current_volume() > (self.average_volume(broker, pair, self.exit_lookback) * self.volume_drop_magnitude_coefficient):\n return True\n else:\n return False \n\n\n\n def entry_conditions(self):\n print('\\nLooking for entry conditions:\\n')\n for i in self.all_coin_data:\n # print('i: ', i)\n # print('self.all_coin_data: ', self.all_coin_data)\n symbol = re.sub('-','',i['MarketName'])\n # print('symbol: ', symbol)\n # if self.spike_in_volume('bittrex', symbol) and self.spike_in_price('bittrex', symbol):\n \n if symbol[0] == 'B': # check for only bitcoin pairs\n # print('symbol: ', symbol)\n if self.spike_in_price('bittrex', symbol):\n print('\\nFound a pair which meets entry criteria')\n self.current_trade_pair = symbol\n return True\n # break # IMPORTANT -- break loop for now and go with the first one, later can filter more choices, perhaps by volume\n '''\n return True\n else:\n return False\n '''\n \n\n\n def get_current_trade_pair(self):\n return self.current_trade_pair\n \n\n\n\n def exit_conditions(self, coin):\n broker = 'bittrex'\n current_price = self.current_price(broker, coin)\n if current_price > self.all_time_high:\n self.all_time_high = current_price\n if current_price < self.all_time_high * self.close_from_the_top_percentage:\n return True\n else:\n return False\n '''\n if self.drop_in_price('bittrex', coin):\n return True\n else:\n return False\n '''\n \n\n '''\n def seek(self):\n log.general('Seeking trades...\\n')\n try:\n while True:\n time.sleep(self.refresh_rate)\n log.general('Seeking trades...\\n')\n # if self.can_trade is 'yes' and self.queue is 'free': # can_trade is an extra conditional for any sort of check to see if we can trade\n if self.queue is 'free':\n if self.entry_conditions():\n if trade.open('bittrex', self.current_trade_pair):\n self.queue = 'busy'\n if self.queue is 'busy':\n if self.exit_conditions(self.current_trade_pair):\n trade.close('bittrex', self.current_trade_pair) # get back into bitcoin\n self.queue = 'free' # reset the queue\n except KeyboardInterrupt:\n pass\n '''\n\n\n def trade(self):\n print('\\nQueue status: ', self.queue, '\\n')\n if self.queue is 'free':\n if self.entry_conditions():\n trade.open('bittrex', self.current_trade_pair)\n self.queue = 'busy'\n if self.queue is 'busy':\n if self.exit_conditions(self.current_trade_pair):\n trade.close('bittrex', self.current_trade_pair) # get back into bitcoin\n self.queue = 'free' # reset the queue\n\n\n\n def paper_trade(self):\n broker = 'bittrex'\n print('\\nQueue status: ', self.queue, '\\n')\n if self.queue is 'free':\n if self.entry_conditions():\n print('reached entry conditions')\n\n balance_to_write = 'BTC paper balance: ' + str(self.btc_paper_balance)\n txt.write_next_line('paper_trading', balance_to_write)\n\n self.last_traded_pair = self.current_trade_pair\n self.old_altcoin_price = self.current_price(broker, self.last_traded_pair)\n\n text_to_write = 'Open: ' + str(self.current_trade_pair) + ' at ' + str(self.old_altcoin_price)\n txt.write_next_line('paper_trading', text_to_write)\n\n self.bought_units = self.btc_paper_balance * self.old_altcoin_price\n bought_units_text_to_write = 'Bought ' + str(self.bought_units) + ' units of ' + str(self.last_traded_pair)\n \n \n self.queue = 'busy'\n if self.queue is 'busy':\n if self.exit_conditions(self.current_trade_pair):\n print('reached exit conditions')\n\n # trade.close('bittrex', self.current_trade_pair) # get back into bitcoin\n self.new_altcoin_price = self.current_price(broker, self.last_traded_pair)\n close_text_to_write = 'Close: ' + str(self.new_altcoin_price)\n txt.write_next_line('paper_trading', close_text_to_write)\n bitcoin_price = self.current_price('bittrex', 'USDTBTC')\n \n \n self.btc_paper_balance = self.bought_units * self.new_altcoin_price\n balance_text = 'New balance: ' + str(self.btc_paper_balance)\n txt.write_next_line('paper_trading', balance_text)\n # self.updated_balance = self.paper_trade_open_price # need to do the conversion in BTC\n self.queue = 'free' # reset the queue\n self.all_time_high = 0 # reset all time high because will be different for different coins\n\n\n\n\n def run(self):\n self.trade()\n\n\n\n\n\n\n'''\ns = Strategy()\nprint(s.entry_conditions())\nprint(print(s.get_current_trade_pair))\n# print(api.readable(s.all_coin_data))\n'''\n \n\n'''\ns = Strategy()\nprint(s.current_volume('bittrex', 'BTCETH'))\n'''\n\n\n\n\n\n","repo_name":"knightsUCF/WhaleHunter","sub_path":"strategy.py","file_name":"strategy.py","file_ext":"py","file_size_in_byte":9625,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"9899389160","text":"'''This program simulates a vending machine and calculate change based on the amount paid in $ and the change only given with the following:1c, 5c, 10c, 25c, $1\r\nKouame KOUASSI\r\n12 april 2014'''\r\n\r\ndef vending_machine():\r\n deposit = 0 \r\n #get the cost of the item(s) purchased\r\n cost = eval(input('Enter the cost (in cents):\\n'))\r\n #ask for money to pay when cost is greater than 0 and ask for more deposit when not enough\r\n while deposit < cost:\r\n deposit += eval(input('Deposit a coin or note (in cents):\\n'))\r\n change = deposit - cost\r\n #Give change when due\r\n if change > 0:\r\n print('Your change is:')\r\n \r\n for i in (100, 25, 10, 5, 1):\r\n #check decreasingly if one of the possible coin is part of the change\r\n if change >= i:\r\n #specify for change more than or equal to $1 as in dollar \r\n if i == 100:\r\n print(change//i, ' x ', '$1',sep = '')\r\n else:\r\n print(change//i, ' x ', i,'c',sep = '')\r\n change -= (change//i)*i\r\n #check until chanege is 0 then break loop\r\n if change == 0:\r\n break\r\n else:\r\n continue\r\n\r\ndef main():\r\n vending_machine()\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n ","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_5/ksskou001/question2.py","file_name":"question2.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"27589837976","text":"import argparse\nimport subprocess\n\nclass Colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n\n force_disable = False\n\n @classmethod\n def disable(cls):\n cls.HEADER = ''\n cls.OKBLUE = ''\n cls.OKGREEN = ''\n cls.WARNING = ''\n cls.FAIL = ''\n cls.ENDC = ''\n\n @classmethod\n def enable(cls):\n if cls.force_disable:\n return\n\n cls.HEADER = '\\033[95m'\n cls.OKBLUE = '\\033[94m'\n cls.OKGREEN = '\\033[92m'\n cls.WARNING = '\\033[93m'\n cls.FAIL = '\\033[91m'\n cls.ENDC = '\\033[0m'\n\n\n\ndef git(*args, repository_path='.'):\n return subprocess.check_output([\"git\"] + list(args), cwd=repository_path,\n stderr=subprocess.STDOUT).decode()\n","repo_name":"parmarjh/Gst_Build","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22283759175","text":"import unittest\nfrom weighted_quick_union_uf import WeightedQuickUnionUF\n\n\nclass Percolation:\n def __init__(self, n):\n \"\"\"create n-by-n grid, with all sites blocked\n \"\"\"\n self.size = n\n self.bottom = self.size**2 + 1\n self.top = 0\n self.bottom = self.size**2 + 1\n self.opened = [[False for x in range(self.size)]\n for x in range(self.size)]\n self.qf = WeightedQuickUnionUF(self.size**2 + 2)\n\n def open(self, row, col): # None\n \"\"\"open site (row, col) if it is not open already\n \"\"\"\n self.opened[row - 1][col - 1] = True\n\n if (row == 1):\n self.qf.union(self.get_qf_index(row, col), self.top)\n elif (row == self.size):\n self.qf.union(self.get_qf_index(row, col), self.bottom)\n\n if row != 1 and self.is_open(row - 1, col):\n self.qf.union(\n self.get_qf_index(row, col), self.get_qf_index(row - 1, col))\n if row < self.size and self.is_open(row + 1, col):\n self.qf.union(\n self.get_qf_index(row, col), self.get_qf_index(row + 1, col))\n if col != 1 and self.is_open(row, col - 1):\n self.qf.union(\n self.get_qf_index(row, col), self.get_qf_index(row, col - 1))\n if col < self.size and self.is_open(row, col + 1):\n self.qf.union(\n self.get_qf_index(row, col), self.get_qf_index(row, col + 1))\n\n def get_qf_index(self, row, col):\n return self.size * (row - 1) + col\n\n def is_open(self, row, col): # bool\n \"\"\"Is site open\n \"\"\"\n return self.opened[row - 1][col - 1]\n\n def is_full(self, i, j):\n if (i > 0 and i <= self.size and j > 0 and j <= self.size):\n return self.qf.connected(self.top, self.get_qf_index(i, j))\n else:\n return False\n\n def percolates(self): # bool\n \"\"\"does the system percolate?\n \"\"\"\n return self.qf.connected(self.top, self.bottom)\n\n def validate(self, row, col):\n \"\"\"Validate that row/col is in the grid\n \"\"\"\n if row < 1 or row > self.size or col < 1 or col > self.size:\n raise ValueError('Out of bounds array access!')\n\n\nclass RandomValues:\n def __init__(self, n):\n values = list(range(1, n + 1))\n self.sample = []\n for y in values:\n self.sample += [[x, y] for x in values]\n\n\nclass PercolationTest(unittest.TestCase):\n def test_percolation(self):\n n = 5\n perc = Percolation(n)\n rv = RandomValues(n).sample\n print(rv)\n c = 0\n\n while (perc.percolates() is False):\n print(rv[c])\n perc.open(*rv[c])\n print(perc.percolates())\n c += 1\n print(perc.qf.array)\n\n print(\"Pecolates after \" + str(c) + \" attempts\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"aarboleda1/princeton_algos","sub_path":"princeton_algorithms/week_1/percolation.py","file_name":"percolation.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"20012788473","text":"\"\"\"\nDAO layer that encapsulates the logic and interaction with the database for worksheets\n\"\"\"\nfrom sqlalchemy import or_\nfrom sqlalchemy.orm import Query\n\nfrom dataall.core.environment.services.environment_resource_manager import EnvironmentResource\nfrom dataall.base.db import paginate\nfrom dataall.modules.worksheets.db.worksheet_models import Worksheet, WorksheetQueryResult\n\n\nclass WorksheetRepository(EnvironmentResource):\n \"\"\"DAO layer for worksheets\"\"\"\n _DEFAULT_PAGE = 1\n _DEFAULT_PAGE_SIZE = 10\n\n @staticmethod\n def count_resources(session, environment, group_uri) -> int:\n return (\n session.query(WorksheetQueryResult)\n .filter(\n WorksheetQueryResult.AwsAccountId == environment.AwsAccountId\n )\n .count()\n )\n\n @staticmethod\n def find_worksheet_by_uri(session, uri) -> Worksheet:\n return session.query(Worksheet).get(uri)\n\n @staticmethod\n def query_user_worksheets(session, username, groups, filter) -> Query:\n query = session.query(Worksheet).filter(\n or_(\n Worksheet.owner == username,\n Worksheet.SamlAdminGroupName.in_(groups),\n )\n )\n if filter and filter.get('term'):\n query = query.filter(\n or_(\n Worksheet.label.ilike('%' + filter.get('term') + '%'),\n Worksheet.description.ilike('%' + filter.get('term') + '%'),\n Worksheet.tags.contains(f\"{{{filter.get('term')}}}\"),\n )\n )\n return query\n\n @staticmethod\n def paginated_user_worksheets(\n session, username, groups, uri, data=None, check_perm=None\n ) -> dict:\n return paginate(\n query=WorksheetRepository.query_user_worksheets(session, username, groups, data),\n page=data.get('page', WorksheetRepository._DEFAULT_PAGE),\n page_size=data.get('pageSize', WorksheetRepository._DEFAULT_PAGE_SIZE),\n ).to_dict()\n","repo_name":"awslabs/aws-dataall","sub_path":"backend/dataall/modules/worksheets/db/worksheet_repositories.py","file_name":"worksheet_repositories.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":190,"dataset":"github-code","pt":"40"} +{"seq_id":"71464258041","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom __future__ import division\nimport numpy as np\nimport os\nimport ROOT\nimport math\nimport cmath\noutputdir = '/home/tmettler/Desktop/v08_00_00_33/V08_00_00_35/weighted_improved/'+'oscillation/' \n\n\n# In[2]:\n\n\ntry:\n os.stat(outputdir)\nexcept:\n os.mkdir(outputdir)\n\n\n# In[3]:\n\n\n# initialte ROOT default canvas\n#ROOT.gROOT.SetBatch(ROOT.kFALSE)\nROOT.gStyle.SetOptStat(0)\nROOT.gStyle.SetOptTitle(1)\nc1 = ROOT.TCanvas(\"c1\",\"c1\",1600,1200)\nc1.SetGrid(1)\nc1.SetLeftMargin(0.14)\nc1.SetRightMargin(0.1)\nc1.SetBottomMargin(0.14)\n\n\n# In[4]:\n\n\n# define imaginary number not as j\nz = 0 + 1j\n\n\n# In[5]:\n\n\n# calculates e^... the time dependent propagation term\n# needs i,j,;ength,energy, returns the value\ndef getEij(i,j,L,E):\n M=0\n if(i==1 and j ==2):\n M=-M21\n if(i==2 and j ==1):\n M=M21\n if(i==3 and j ==2):\n M=M32\n if(i==2 and j ==3):\n M=-M32\n if(i==1 and j ==3):\n M=-M32-M21\n if(i==3 and j ==1):\n M=M32+M21\n this = cmath.exp(-z*M*L/E*2.534)\n #if M==0:\n # print 'Error!',i,j, this\n return this\n \n\n\n# In[6]:\n\n\n# calculates probabilities for a to b oscillation\n# needs a,b,length,energy,0/1 neutrino/antineutrino, returns probability\n# careful: contains U matrix\ndef getProb(a,b,L,E,anti):\n P = 0\n U = [[math.cos(t1)*math.cos(t3),math.sin(t1)*math.cos(t3),math.sin(t3)*cmath.exp(-z*d)], [-math.sin(t1)*math.cos(t2)-math.cos(t1)*math.sin(t2)*math.sin(t3)*cmath.exp(z*d),math.cos(t1)*math.cos(t2)-math.sin(t1)*math.sin(t2)*math.sin(t3)*cmath.exp(z*d),math.sin(t2)*math.cos(t3)], [math.sin(t1)*math.sin(t2)-math.cos(t1)*math.cos(t2)*math.sin(t3)*cmath.exp(z*d),-math.cos(t1)*math.sin(t2)-math.sin(t1)*math.cos(t2)*math.sin(t3)*cmath.exp(z*d),math.cos(t2)*math.cos(t3)]]\n if anti == 1:\n U = np.conj(U)\n for i in range(3):\n for j in range(3):\n P+=U[a][i]*np.conj(U[a][j])*np.conj(U[b][i])*U[b][j]*getEij(i+1,j+1,L,E)\n return P.real\n\n\n# In[7]:\n\n\n# generates three histograms with the probability for each flavour over length\n# a= start composition, length, energy, neutrino/antoneutrino\ndef fillProbL(a,L,E,anti=0):\n n_bins = 1000\n h_e = ROOT.TH1F('h_e','h_e',n_bins,0,L)\n h_u = ROOT.TH1F('h_u','h_u',n_bins,0,L)\n h_t = ROOT.TH1F('h_t','h_t',n_bins,0,L)\n h_e.SetTitle('black=e, blue=mu, red=tau')\n h_e.SetLineColor(ROOT.kBlack)\n titel = 'Neutrino energy = '+str(E)+' GeV, Length [km]'\n h_e.SetXTitle(titel)\n h_e.SetYTitle('Probability')\n h_e.SetMinimum(0)\n h_e.SetMaximum(1)\n h_u.SetLineColor(ROOT.kBlue)\n h_t.SetLineColor(ROOT.kRed)\n for i in range(n_bins):\n h_e.SetBinContent(i,getProb(a,0,i*L/n_bins,E,anti))\n h_u.SetBinContent(i,getProb(a,1,i*L/n_bins,E,anti))\n h_t.SetBinContent(i,getProb(a,2,i*L/n_bins,E,anti))\n return h_e,h_u,h_t\n\n# generates three histograms with the probability for each flavour over energy\n# a= start composition, length, energy, neutrino/antoneutrino\ndef fillProbE(a,L,E,anti):\n n_bins = 1000\n h_e = ROOT.TH1F('h_e','h_e',n_bins,0,E)\n h_u = ROOT.TH1F('h_u','h_u',n_bins,0,E)\n h_t = ROOT.TH1F('h_t','h_t',n_bins,0,E)\n h_e.SetTitle('black=e, blue=mu, red=tau')\n h_e.SetLineColor(ROOT.kBlack)\n titel = 'Length = '+str(L)+' km, Energy [GeV]'\n h_e.SetXTitle(titel)\n h_e.SetYTitle('Probability')\n h_e.SetMinimum(0)\n h_e.SetMaximum(1)\n h_u.SetLineColor(ROOT.kBlue)\n h_t.SetLineColor(ROOT.kRed)\n for i in range(n_bins):\n h_e.SetBinContent(i,getProb(a,0,L,(i+1)*E/n_bins,anti))\n h_u.SetBinContent(i,getProb(a,1,L,(i+1)*E/n_bins,anti))\n h_t.SetBinContent(i,getProb(a,2,L,(i+1)*E/n_bins,anti))\n return h_e,h_u,h_t\n \n\n\n# # Here starts the interactive part!\n# Give oscillation parameters and generate histograms for the oscillation probabilities...\n\n# In[11]:\n\n\n# define neutrino oscillation parameters\nt1 = 33.6*2*cmath.pi/360 # t_12\nt2 = 47.7*2*cmath.pi/360 # t_23\nt3 = 8.49*2*cmath.pi/360 # t_13\nM21 = 7.53e-5 # delta m21 mass difference squared\nM32 = 2.457e-3 # delta m32 mass difference squared\nd = cmath.pi/2 # delta CP phase\n\nE = 1 # energy of neutrino in GeV\nL = 10000 # maximal oscialltion length in km\na = fillProbL(0,L,E,0) # give [0-2]={e,u,t}, length, energy, [0,1]={neutrino,antineutrino}\nh_e = a[0] # electron probability\nh_u = a[1] # muon probability\nh_t = a[2] # tau probability\nh_e.Draw()\nh_u.Draw('same')\nh_t.Draw('same')\nc1.Draw()\nc1.SaveAs(outputdir+'Oscillation_length.png')\n\n\n# In[12]:\n\n\nE = 1\nL = 10000\na = fillProbE(0,L,E,0)\nh_e = a[0]\nh_u = a[1]\nh_t = a[2]\nh_e.Draw()\nh_u.Draw('same')\nh_t.Draw('same')\nc1.Draw()\nc1.SaveAs(outputdir+'Oscillation_energy.png')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"thomasmettler/ub_analysis_notebooks","sub_path":"Oscillation.py","file_name":"Oscillation.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"40580375210","text":"import json\nimport os\n\nSECRETS_JSON_PATH = os.path.join(os.path.dirname(__file__), \"secrets.json\")\nREFRESH_TOKEN_FIELD = \"zoom.refresh_token\"\nACCESS_TOKEN_FIELD = \"zoom.access_token\"\nEXPIRATION_TIME_FIELD = \"zoom.access_token_expiry_time\"\n\n\nclass SecretsStorage:\n \"\"\"This Class handles the fetching and storing of refresh token to and from the secrets storage.\"\"\"\n\n def __init__(self, config, logger) -> None:\n self.config = config\n self.logger = logger\n\n def get_secrets(self):\n \"\"\"The module returns a dictionary containing refresh token, access token,and expiration time\n of access token(UTC format) from the secrets storage.\n :returns secret_store_data: a dictionary containing refresh token, access token and expiration time\n of access token(UTC format) from the secrets storage.\n \"\"\"\n if os.path.exists(SECRETS_JSON_PATH) and os.path.getsize(SECRETS_JSON_PATH) > 0:\n with open(SECRETS_JSON_PATH, encoding=\"UTF-8\") as secrets_store:\n try:\n secrets = json.load(secrets_store)\n return secrets\n except ValueError as exception:\n self.logger.exception(\n f\"Error while parsing the secrets storage from path: {SECRETS_JSON_PATH}. Error: {exception}\"\n )\n\n def set_secrets(self, secrets):\n \"\"\"The module stores a dictionary containing refresh token, access token and expiration time\n of access token(UTC format) in to local secrets storage.\n :param secrets: a dictionary containing refresh token, access token and expiration time\n of access token(UTC format) to store in secrets storage.\n \"\"\"\n with open(SECRETS_JSON_PATH, \"w\", encoding=\"UTF-8\") as secrets_store:\n try:\n json.dump(secrets, secrets_store, indent=4)\n self.logger.info(\"Successfully saved the Refresh token in secrets\")\n except Exception as exception:\n self.logger.exception(\n f\"Error while updating the secrets storage.\\nError: {exception}\"\n )\n","repo_name":"elastic/enterprise-search-zoom-connector","sub_path":"ees_zoom/secrets_storage.py","file_name":"secrets_storage.py","file_ext":"py","file_size_in_byte":2156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"71877865720","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 28 15:32:50 2018\n\n@author: dannem\n\"\"\"\ndef foldMatDN(data,els='all',stim='all',domain='all',blocks='all',ilust=False):\n # channel * freq * id * block\n import numpy as np\n import matplotlib.pyplot as plt\n import seaborn as sns\n if 'all' in str(els):\n els=np.arange(0,data.shape[0])\n if 'all' in str(stim):\n stim=np.arange(0,data.shape[2])\n if 'all' in str(domain):\n domain=np.arange(0,data.shape[1])\n if 'all' in str(blocks):\n blocks=np.arange(0,data.shape[3])\n data=data[np.ix_(els,domain,stim,blocks)]\n labels=np.tile(np.arange(1,data.shape[2]+1),data.shape[3])\n data=data.reshape((data.shape[0]*data.shape[1],data.shape[2]*data.shape[3]),order='F')\n data=data.transpose()\n if ilust==True:\n fig = plt.gcf()\n fig.set_size_inches(20, 15)\n sns.heatmap(data,vmin=0, vmax=0.74)\n plt.show()\n return (data,labels)\n\n\ndef loadDataDN(fileName, folder='default',printSize=True):\n \"\"\"Written by Nemrodov Dan\n Imports data from the lwdata.mat structure\n Example: data=af.loadDataDN('S09_fft.mat')\n \"\"\"\n import platform\n import scipy.io\n import numpy as np\n if 'default' in folder:\n if 'nestor' in platform.uname()[1]:\n folder='/Users/dannem/Documents/DataAnalysis'\n elif 'Dell_DN' in platform.uname()[1]:\n folder='C:/Users/Dan/Documents/MATLAB'\n if '.mat' in fileName:\n fileName=fileName[:-4]\n mat=scipy.io.loadmat(folder+'/'+fileName+'.mat')\n if printSize:\n print(type(mat))\n print(mat.keys())\n print(np.shape(mat[fileName]))\n try:\n data = mat[fileName]\n except:\n data=mat\n del mat\n return data","repo_name":"dannem/PythonClass","sub_path":"auxfuns.py","file_name":"auxfuns.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"30582158933","text":"from tkinter import *\nimport Final_Login as fl\nimport sqlite3\nfrom tkinter.ttk import Treeview\nfrom tkcalendar import Calendar\nimport datetime as datee\nfrom tkinter import messagebox\n\n\nclass MenuUtama:\n def __init__(self, root):\n self.root = root\n root.title(\"Menu Utama Catering\")\n root.geometry(\"1020x600\")\n root.config(bg=\"lightblue\")\n root.iconbitmap(\"menu-ico.ico\")\n self.wrapper1 = LabelFrame(root, text=\"Form Pesanan Baru\")\n self.wrapper1.pack(fill=BOTH, padx=15, pady=(15, 0))\n self.wrapper2 = LabelFrame(\n root, text=\"Data Pesanan\", bg=\"lightblue\", fg=\"black\"\n )\n self.wrapper2.pack(fill=BOTH, expand=True, padx=15, pady=(0, 15))\n\n # Function\n\n # Mengdeklarasi paket sebagai variabel integer\n\n paket = IntVar()\n\n # Memanggil tanggal sekarang\n\n x = datee.datetime.now().strftime(f\"%d/%m/%y\")\n\n # Function untuk Memilih tanggal\n\n cal = Calendar(self.wrapper1, selectmode=\"day\")\n cal.grid(row=2, rowspan=7, column=3)\n\n # Mengambil data tanggal yang dipilih dari Calender\n\n def grad_date():\n dateTtl.config(text=\"Tanggal yang dipilih : \" + cal.get_date())\n\n # Melihat isi table dari database sqlite3\n\n def DatabaseView():\n trv.delete(*trv.get_children())\n con = sqlite3.connect(\"catering.db\")\n c = con.cursor()\n c.execute(\"SELECT * FROM datapesananbarubaru ORDER BY tgl_krm ASC\")\n fetch = c.fetchall()\n for data in fetch:\n trv.insert(\n \"\",\n \"end\",\n values=(\n data[0],\n data[1],\n data[2],\n data[3],\n data[4],\n data[5],\n data[6],\n data[7],\n ),\n )\n con.commit()\n con.close()\n\n # Tombol Form Pesanan\n\n nama = Label(self.wrapper1, text=\"Masukan Nama Anda \").grid(\n row=0, column=0, sticky=W\n )\n namaEntry = Entry(self.wrapper1, width=30)\n namaEntry.grid(row=0, column=1, sticky=W)\n paketLabel = Label(self.wrapper1, text=\"Pilih Paket Anda \").grid(\n row=1, column=0, sticky=W\n )\n myLabel = Label(self.wrapper1, text=\"\")\n myLabel.grid(row=3, column=0, sticky=W)\n r1 = Radiobutton(self.wrapper1, text=\"Pernikahan\", variable=paket, value=1)\n r1.grid(row=1, column=1, sticky=W)\n r2 = Radiobutton(self.wrapper1, text=\"Ulang Tahun\", variable=paket, value=2)\n r2.grid(row=2, column=1, sticky=W)\n pilihPaket = Button(\n self.wrapper1, text=\"Pilih Paket\", command=lambda: clicked(paket.get())\n )\n pilihPaket.grid(row=3, column=1, sticky=W)\n porsi = Label(self.wrapper1, text=\"Porsi (Min 150)\").grid(\n row=4, column=0, sticky=W\n )\n porsiEntry = Entry(self.wrapper1)\n porsiEntry.grid(row=4, column=1, sticky=W)\n alamat = Label(self.wrapper1, text=\"Alamat\").grid(row=5, column=0, sticky=W)\n alamatEntry = Entry(self.wrapper1, width=30)\n alamatEntry.grid(row=5, column=1, sticky=W)\n tanggal = Label(self.wrapper1, text=\"Tanggal Pemesanan\").grid(\n row=6, column=0, sticky=W\n )\n tanggalEntry = Label(self.wrapper1, text=x)\n tanggalEntry.grid(row=6, column=1, sticky=W)\n\n # Menampilkan paket yang dipilih dari radiobutton\n\n def clicked(value):\n if value == 1:\n myLabel.config(text=\"Pernikahan\")\n elif value == 2:\n myLabel.config(text=\"Ulang Tahun\")\n\n # Menghitung Total dari harga paket dikali jumlah porsi\n\n def hitungTotal():\n global hargaakhir\n try:\n if paket.get() == 1 and int(porsiEntry.get()) >= 150:\n porsi = int(porsiEntry.get())\n hargaakhir = porsi * 25000\n hargaCounter.config(text=\"Total Harga : \" + str(hargaakhir))\n elif paket.get() == 2 and int(porsiEntry.get()) >= 150:\n porsi = int(porsiEntry.get())\n hargaakhir = porsi * 30000\n hargaCounter.config(text=\"Total Harga : \" + str(hargaakhir))\n else:\n messagebox.showerror(\n \"ERROR\", \"Silahkan Pilih Paket dan Porsi diatas 150!\"\n )\n except:\n messagebox.showerror(\"ERROR\", \"Silahkan isi porsi!\")\n\n # Function menghitung kembalian dari total harga dikurangi cash\n\n def hitungKembalian():\n try:\n cash = int(inputCash.get())\n kembaliannya = cash - hargaakhir\n kembalianOutput.config(text=\"\" + str(kembaliannya))\n except:\n messagebox.showerror(\"ERROR\", \"Ada yang salah\")\n\n # Lanjutan form Pesanan\n\n hargaCounter = Label(self.wrapper1, text=\"Total Harga : \")\n hargaCounter.grid(row=7, column=0, sticky=W)\n inputCashHolder = Label(self.wrapper1, text=\"Cash :\").grid(\n row=7, column=1, sticky=W\n )\n inputCash = Entry(self.wrapper1, width=20)\n inputCash.grid(row=7, column=1, sticky=E)\n kembalianHolder = Button(\n self.wrapper1, text=\"Hitung Kembalian\", command=hitungKembalian\n ).grid(row=8, column=1, sticky=W)\n kembalianOutput = Label(self.wrapper1, text=\"\")\n kembalianOutput.grid(row=8, column=1, sticky=E)\n hargaButton = Button(\n self.wrapper1, text=\"Hitung Harga\", command=hitungTotal\n ).grid(row=8, column=0, sticky=W)\n noLabel = Label(self.wrapper1, text=\"NO Telepon :\").grid(\n row=9, column=0, sticky=W\n )\n noEntry = Entry(self.wrapper1)\n noEntry.grid(row=9, column=1, sticky=W)\n gapMaker = Label(self.wrapper1, text=\" \").grid(\n row=0, rowspan=10, column=2\n )\n ttglkirim = Label(\n self.wrapper1, text=\"Tanggal Kirim \", bg=\"blue\", fg=\"white\"\n ).grid(row=0, column=3)\n saveBuxtton = Button(\n self.wrapper1,\n text=\"Pilih Tanggal\",\n command=grad_date,\n bg=\"blue\",\n fg=\"white\",\n )\n saveBuxtton.grid(row=9, column=3, sticky=W)\n dateTtl = Label(self.wrapper1, text=\"Tanggal yang dipilih : \")\n dateTtl.grid(row=1, column=3)\n\n # Functions SQLITE\n\n # Function untuk memastikan form tidak kosong\n\n def validasi():\n return (\n namaEntry.get() != \"\"\n and paket.get() != \"\"\n and porsiEntry.get() != \"\"\n and alamatEntry.get() != \"\"\n and x != \"\"\n and cal.get_date() != \"\"\n and hargaakhir != \"\"\n and noEntry.get() != \"\"\n )\n\n # Function menambah data yang sudah di input ke dalam database sqlite3\n\n def tambahData():\n if validasi():\n try:\n trv.delete(*trv.get_children())\n con = sqlite3.connect(\"catering.db\")\n c = con.cursor()\n if paket.get() == 1:\n paket1 = \"Pernikahan\"\n elif paket.get() == 2:\n paket1 = \"Ulang Tahun\"\n c.execute(\n \"INSERT INTO datapesananbarubaru VALUES (:nama, :paket, :porsi, :alamat, :tglpesan, :tglkirim, :harga, :notelp)\",\n {\n \"nama\": namaEntry.get(),\n \"paket\": paket1,\n \"porsi\": porsiEntry.get(),\n \"alamat\": alamatEntry.get(),\n \"tglpesan\": x,\n \"tglkirim\": cal.get_date(),\n \"harga\": hargaakhir,\n \"notelp\": noEntry.get(),\n },\n )\n con.commit()\n c.execute(\"SELECT * FROM datapesananbarubaru\")\n fetch = c.fetchall()\n for data in fetch:\n trv.insert(\n \"\",\n \"end\",\n values=(\n data[0],\n data[1],\n data[2],\n data[3],\n data[4],\n data[5],\n data[6],\n data[7],\n ),\n )\n con.commit()\n con.close()\n resetForm()\n messagebox.showinfo(\"SUCCESS\", \"Data Berhasil Disimpan!\")\n except ValueError:\n messagebox.showerror(\"ERROR\", \"Silahkan Masukan data yang benar!\")\n else:\n messagebox.showerror(\"ERROR\", \"Silahkan Isi semua data!\")\n\n # Function mengupdate data yang sudah di pilih ke dalam database sqlite3\n\n def updateData():\n if validasi():\n try:\n trv.delete(*trv.get_children())\n con = sqlite3.connect(\"catering.db\")\n c = con.cursor()\n if paket.get() == 1:\n paket1 = \"Pernikahan\"\n elif paket.get() == 2:\n paket1 = \"Ulang Tahun\"\n c.execute(\n f\"UPDATE datapesananbarubaru SET nama = :nama, paket = :paket, porsi = :porsi, alamat = :alamat, tgl_psn = :tglpesan, tgl_krm = :tglkirim, total_harga = :harga, notlp = :notelp WHERE nama = '{dataNama.get()}'\",\n {\n \"nama\": namaEntry.get(),\n \"paket\": paket1,\n \"porsi\": porsiEntry.get(),\n \"alamat\": alamatEntry.get(),\n \"tglpesan\": x,\n \"tglkirim\": cal.get_date(),\n \"harga\": hargaakhir,\n \"notelp\": noEntry.get(),\n },\n )\n con.commit()\n c.execute(\"SELECT * FROM datapesananbarubaru\")\n fetch = c.fetchall()\n for data in fetch:\n trv.insert(\n \"\",\n \"end\",\n values=(\n data[0],\n data[1],\n data[2],\n data[3],\n data[4],\n data[5],\n data[6],\n data[7],\n ),\n )\n con.commit()\n con.close()\n resetForm()\n messagebox.showinfo(\"SUCCESS\", \"Data Berhasil Diupdate!\")\n except ValueError:\n messagebox.showerror(\"ERROR\", \"Silahkan Masukan data yang benar!\")\n\n else:\n messagebox.showerror(\"ERROR\", \"Silahkan Isi semua data!\")\n\n # Functions untuk memastikan data yang dipilih tidak kosong\n\n def validData():\n return len(dataNama.get()) != 0\n\n # Functions untuk menghapus data yang dipilih dari table\n\n def hapusData():\n if validData():\n try:\n con = sqlite3.connect(\"catering.db\")\n c = con.cursor()\n c.execute(\n f\"DELETE FROM datapesananbarubaru WHERE nama = '{dataNama.get()}'\"\n )\n con.commit()\n trv.delete(*trv.get_children())\n c.execute(\"SELECT * FROM datapesananbarubaru\")\n fetch = c.fetchall()\n for data in fetch:\n trv.insert(\n \"\",\n \"end\",\n values=(\n data[0],\n data[1],\n data[2],\n data[3],\n data[4],\n data[5],\n data[6],\n data[7],\n ),\n )\n con.commit()\n con.close()\n messagebox.showinfo(\"Success\", \"Data Berhasil Dihapus\")\n dataNama.delete(0, END)\n resetForm()\n except ValueError:\n messagebox.showerror(\"ERROR\", \"Isi data yang akan dihapus!\")\n else:\n messagebox.showerror(\"ERROR\", \"Isi Data yang akan dihapus!\")\n\n # Function untuk Mereset form pesanan agar kosong\n\n def resetForm():\n try:\n myLabel.config(text=\"\")\n namaEntry.delete(0, END)\n paket.set(None)\n porsiEntry.delete(0, END)\n alamatEntry.delete(0, END)\n hargaCounter.config(text=\"Total Harga :\")\n noEntry.delete(0, END)\n dateTtl.config(text=\"Tanggal yang dipilih : \")\n tanggalEntry.config(text=x)\n dataNama.delete(0, END)\n kembalianOutput.config(text=\"\")\n inputCash.delete(0, END)\n except:\n messagebox.showerror(\"ERROR\", \"Data Sudah Kosong!\")\n\n # Functin Exit MenuUtama\n\n def exitMenu():\n tglnya = datee.datetime.now().strftime(f\"%d/%m/%y - %X\")\n con = sqlite3.connect(\"catering.db\")\n c = con.cursor()\n c.execute(\"SELECT * FROM logadmin ORDER BY tglmasuk DESC\")\n fetchlog = c.fetchone()\n namanya = fetchlog[0]\n c.execute(\n f\"UPDATE logadmin SET tglkeluar = '{tglnya}' WHERE nama = '{namanya}'\"\n )\n con.commit()\n con.close()\n root.destroy()\n # Membuka window baru (Final_Login)\n rootLogin = Tk()\n fl.loginForm(rootLogin)\n rootLogin.mainloop()\n\n # Tombol Operasi SQLite\n\n exitButton = Button(\n self.wrapper2, text=\"Exit\", bg=\"red\", fg=\"white\", command=exitMenu\n ).pack(side=BOTTOM)\n resetButton = Button(\n self.wrapper1, text=\"Reset Form\", command=resetForm, bg=\"red\", fg=\"white\"\n ).grid(row=9, column=8)\n gapmaker2 = Label(self.wrapper1, text=\" \").grid(rowspan=7, column=4)\n labelHapus = Label(self.wrapper1, text=\"Data yang Terpilih adalah : \").grid(\n row=8, column=6\n )\n refreshButton = Button(\n self.wrapper1, text=\"Update Data\", bg=\"yellow\", command=updateData\n ).grid(row=9, column=3, sticky=E)\n hapusButton = Button(\n self.wrapper1, text=\"Hapus Data\", bg=\"red\", fg=\"white\", command=hapusData\n ).grid(row=9, column=7)\n dataNama = Entry(self.wrapper1)\n dataNama.grid(row=9, column=6)\n simpanButton = Button(\n self.wrapper1,\n text=\"Simpan Data\",\n bg=\"green\",\n fg=\"white\",\n command=tambahData,\n ).grid(row=9, column=3)\n updateButton = Button(\n self.wrapper2,\n text=\"View Table\",\n bg=\"green\",\n fg=\"white\",\n command=DatabaseView,\n ).pack()\n\n # Tampilan Menu Makanan\n\n menu1 = Label(\n self.wrapper1,\n text=\"PAKET PERNIKAHAN \\n 1. Ayam Gulai \\n 2. Sop Kambing \\n 3. Asinan \\n 4. AQUA \\n\\n Rp.25,000\",\n bg=\"lightblue\",\n ).grid(row=1, rowspan=6, column=6)\n menu2 = Label(\n self.wrapper1,\n text=\"PAKET ULANG TAHUN \\n 1. Butter Cake \\n 2. French Fries \\n 3. Ice Cream \\n 4. Fanta \\n\\n Rp.30,000\",\n bg=\"lightblue\",\n ).grid(row=1, rowspan=6, column=7)\n\n # Menampilkan Isi di table dari database\n\n tree_scrollbar = Scrollbar(self.wrapper2)\n tree_scrollbar.pack(side=RIGHT, fill=Y)\n trv = Treeview(\n self.wrapper2,\n columns=(1, 2, 3, 4, 5, 6, 7, 8),\n show=\"headings\",\n yscrollcommand=tree_scrollbar.set,\n )\n tree_scrollbar.config(command=trv.yview)\n # Pembentukan kolom Table\n trv.column(1, width=100)\n trv.column(2, width=100)\n trv.column(3, width=50)\n trv.column(4, width=200)\n trv.column(5, width=100)\n trv.column(6, width=100)\n trv.column(7, width=150)\n trv.column(8, width=150)\n # Pemberian nama kolom table\n trv.heading(1, text=\"Nama\")\n trv.heading(2, text=\"Paket\")\n trv.heading(3, text=\"Porsi\")\n trv.heading(4, text=\"Alamat\")\n trv.heading(5, text=\"TGL Pesan\")\n trv.heading(6, text=\"TGL Kirim\")\n trv.heading(7, text=\"Total Harga\")\n trv.heading(8, text=\"No Telp\")\n trv.pack()\n\n # Function Memilih table agar muncul di form\n\n def selector(Event):\n resetForm()\n dataNama.delete(0, END)\n selected = trv.focus()\n values = trv.item(selected, \"values\")\n dataNama.insert(0, values[0])\n myLabel.config(text=values[1])\n if myLabel.cget(\"text\") == \"Pernikahan\":\n paket.set(value=\"1\")\n r1.invoke()\n elif myLabel.cget(\"text\") == \"Ulang Tahun\":\n paket.set(value=\"2\")\n r2.select()\n else:\n messagebox.showerror(\"ERROR\", \"Sesuatu ada yang salah\")\n namaEntry.insert(0, values[0])\n porsiEntry.insert(0, values[2])\n alamatEntry.insert(0, values[3])\n tanggalEntry.config(text=values[4])\n dateTtl.config(text=\"Tanggal yang dipilih : \" + values[5])\n hargaCounter.config(text=\"Total Harga : \" + values[6])\n noEntry.insert(0, values[7])\n\n # Event Listener untuk mendengarkan double klik\n\n trv.bind(\"\", selector)\n\n\nif __name__ == \"__main__\":\n root = Tk()\n application = fl.loginForm(root)\n root.mainloop()\n","repo_name":"tegarjoko/Python-GUI-Project","sub_path":"Final_Menu.py","file_name":"Final_Menu.py","file_ext":"py","file_size_in_byte":18782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35845832639","text":"from socket import *\r\nimport google.protobuf\r\n\r\nimport protobuf.protobuf_modify.message1_pb2 as message\r\nimport traceback\r\nimport google.protobuf.any_pb2\r\n# from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2\r\nfrom RuleBased import *\r\nimport threading\r\n\r\nHOST = '127.0.0.1'\r\nPORT = 6006\r\nBUFSIZ = 1024\r\nADDR = (HOST, PORT)\r\n\r\n\r\nclass clientThread(threading.Thread): # 继承父类threading.Thread\r\n def __init__(self, threadID, name, counter, tcpCliSock):\r\n threading.Thread.__init__(self)\r\n self.threadID = threadID\r\n self.name = name\r\n self.counter = counter\r\n self.tcpCliSock = tcpCliSock\r\n\r\n def run(self): # 把要执行的代码写到run函数里面 线程在创建后会直接运行run函数\r\n # tcpCliSock = socket(AF_INET, SOCK_STREAM)\r\n # tcpCliSock.connect(ADDR)\r\n # 等待Hello\r\n protobufhello = self.tcpCliSock.recv(BUFSIZ)\r\n hello_message = message.Hello()\r\n hello_message.ParseFromString(protobufhello)\r\n print(hello_message)\r\n # seat = hello_message.seat\r\n # print('seat', seat)\r\n print(\"Hello\")\r\n hello_message.code = 1\r\n self.tcpCliSock.send(hello_message.SerializeToString())\r\n while True:\r\n # Head_data = tcpCliSock.recv(4) # 接收数据头 4个字节,\r\n # data_len = int.from_bytes(Head_data, byteorder='big')\r\n # print(\"入座成功\")\r\n print(\"is recving\")\r\n protobufdata = self.tcpCliSock.recv(BUFSIZ)\r\n print(\"recv\")\r\n game_state_message = message.GameState() # 读取GameState\r\n game_state_message.ParseFromString(protobufdata)\r\n # print('message:',game_state_message)\r\n player = game_state_message.who\r\n # assert player == seat\r\n if player == 0:\r\n # Declare\r\n card = declarer(game_state_message, \"hard\")\r\n # 发送\r\n elif player == 1:\r\n card = lopp(game_state_message, \"hard\")\r\n elif player == 2:\r\n card = dummy(game_state_message, \"hard\")\r\n else:\r\n card = ropp(game_state_message, \"hard\")\r\n play = message.Play()\r\n play.who = player\r\n play.card.CopyFrom(card)\r\n self.tcpCliSock.send(play.SerializeToString())\r\n print(\"play\")\r\n\r\n# tcpCliSock.close()\r\nthread_num = 1\r\nthread_list = []\r\ntcpCliSock = []\r\nfor i in range(thread_num):\r\n tcpCliSocki = socket(AF_INET, SOCK_STREAM)\r\n tcpCliSocki.connect(ADDR)\r\n thread_list.append(clientThread(i+1, \"Thread-\"+str(i), i+1, tcpCliSocki))\r\n tcpCliSock.append(tcpCliSocki)\r\n thread_list[i].start()\r\n\r\n","repo_name":"MS-ASE-2020/team-aigame","sub_path":"src/ModelClient/ClientRuleBased.py","file_name":"ClientRuleBased.py","file_ext":"py","file_size_in_byte":2748,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"42110605860","text":"#!/usr/bin/env python\n\"\"\" Load in the dbVar data and output summary\nstatistics.\n\nNCBI hackathon dbVar group 2016\n\"\"\"\n\nimport timeit, glob\nimport sys\nimport multiprocessing as mp\nimport ConfigParser\n\nimport pandas as pd\nimport numpy as np\n\nfrom utils import (\n filter_by_size,\n reverse_dictionary,\n generate_unique_mapping,\n groupby_study_numba,\n generate_unique_mapping_numba,\n )\nfrom generate_report import generate_report\n\nfrom IPython import embed\n\n\n\ndef main(): \n config = ConfigParser.RawConfigParser()\n config.read(sys.argv[1])\n gpath = config.get('input', 'make_ref') \n size_limit = config.getfloat('params', 'max_size')\n files = glob.glob(gpath + \"tab/*.txt\")\n studies_include = config.get('params', 'studies_include')\n studies_exclude = config.get('params', 'studies_exclude').split(\",\")\n vartype_f = config.get('params', 'var_type')\n if studies_include == '' or studies_include == None:\n studies_include = []\n else:\n studies_include = studies_include.split(\",\")\n filtered = []\n start = timeit.default_timer()\n pool = mp.Pool(8)\n files = files[0:20]\n studies = [i.split(\"/\")[-1].rstrip(\".txt\") for i in files]\n for i in files:\n study = i.split(\"/\")[-1].rstrip(\".txt\")\n if study in studies_exclude: pass\n else:\n if (len(studies_include) == 0) or (study in studies_include):\n reader = pd.read_csv(i, sep=\"\\t\", \n index_col=0, dtype={'chr':'S5'})\n pool.apply_async(filter_by_size, [reader, study],\n {'max_size': size_limit},\n callback = lambda x: filtered.append(x))\n else: pass\n # Remove duplicated elements\n ###### Step takes around 7 minutes ###################\n pool.close()\n pool.join()\n df = pd.concat(filtered)\n print(vartype_f)\n stop = timeit.default_timer()\n print('Time to load in files and parse: {0!s}'.format(stop-start))\n p_studies = set(df.study)\n non_passed = []\n for i in studies:\n if i not in p_studies:\n non_passed.append(i)\n print(('Studies that had no variants that did '\n 'not pass size filtering:{0}').format(\"\\t\".join(non_passed)))\n ############## HACK for now until we find out what is going on #\n # Get rid of the contigs for now\n df = df.ix[df.contig.isnull(), :]\n # The GRc37 to 38 multiple mapping isn't resolved need to discuss how to \n # deal with this\n df = df.ix[np.logical_not(df.index.duplicated()),:]\n # :TODO if sstart and sstop are the same, no\n # matter if it was originally annotated as inner_start\n # or inner stop it will be collapsed\n # For now since, ignore fuzzy \n dfd = df.drop_duplicates(['chr', 'var_type',\n 'sstart', 'sstop'], inplace=False)\n new_unique_index = np.arange(dfd.shape[0])\n dfd.loc[:,'uID'] = new_unique_index\n print('new index created')\n # This step takes forever\n start = timeit.default_timer()\n groups = df.groupby('chr')\n unique_mapping = []\n pool = mp.Pool(8)\n for name, group in groups:\n pool.apply_async(generate_unique_mapping,\n args = (dfd.ix[dfd.chr == name,:], group), \n callback=lambda x: unique_mapping.append(x))\n '''\n tgroup = dfd.ix[dfd['chr'] == name,]\n pool.apply_async(generate_unique_mapping_numba,\n args = (group.sstart.values, \n group.sstop.values, \n tgroup.sstart.values, \n tgroup.sstop.values, \n tgroup.index.values),\n callback=lambda x: unique_mapping.append(pd.Series(x,\n index = group.index)))\n '''\n pool.close()\n pool.join()\n ns = pd.concat(unique_mapping)\n print('Time to generate mapping: {0!s}'.format((stop-start)))\n df['uID'] = ns\n report_dict = {}\n nstudies = config.getint('params', 'nstudies')\n start = timeit.default_timer()\n output = np.zeros(dfd.uID.shape[0], dtype=bool)\n embed()\n std_filter = groupby_study_numba(df.uID.values, df.study, \n output, nstudies=nstudies) \n print(np.sum(std_filter))\n dfd = dfd.ix[std_filter,:]\n df = df.ix[df.uID.isin(dfd.uID),:]\n dfd.to_csv(gpath + 'filtered_no_dupes.txt', sep=\"\\t\")\n df.to_csv(gpath + 'study_filtered_all.txt', sep=\"\\t\")\n print('Time to run: {0!s}'.format(stop - start))\n groups = dfd.groupby('var_type')\n from plot import plot_dists\n generate_report(report_dict)\n rpath = config.get('output', 'report_dir')\n for name, group in groups:\n plot_dists(group.sstop - group.sstart, name,\n rpath)\n type_count = dfd.groupby('var_type').agg(lambda x:\n x.shape[0]).loc[:, ['chr']]\n var_percent = type_count.ix[:,0]/float(dfd.shape[0])*100\n type_count['var_percent'] = var_percent\n type_count['var_percent'].round(2)\n report_dict['var_type_pivot'] = type_count.to_html()\n report_dict['studies'] = []\n report_dict['var_types'] = [name for name, _ in groups]\n generate_report(report_dict)\n\n\n\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"NCBI-Hackathons/Structural_Variant_Comparison","sub_path":"py/analytics.py","file_name":"analytics.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"40"} +{"seq_id":"18350320123","text":"#!/usr/bin/python3\n\"\"\"\nThe module opens a yaml file and updates the value of a passed key.\n\nUpdates all instances of the passed key found within the entire file or within a specific top\nlevel key\n\"\"\"\nimport yaml\nimport argparse\n\n\ndef update_yaml_file(filename, new_config):\n \"\"\"\n Update a yaml file with new configuration details.\n\n Paramters\n ---------\n filename : STRING\n path to the file to be opened.\n new_config : STRING\n new confiruation details to add/updated the file with.\n\n Returns\n -------\n None.\n \"\"\"\n try:\n with open(filename, 'w') as f:\n yaml.dump(new_config, f)\n except Exception as e:\n print(str(e))\n\n\ndef open_yaml_file(filename):\n \"\"\"\n Open a yaml file to extract configuration details.\n\n Paramters\n ---------\n filename : STRING\n path to the file to be opened.\n\n Returns\n -------\n config : STRING\n configuration details located in the file.\n \"\"\"\n try:\n with open(filename, 'r') as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n return config\n except Exception as e:\n print(str(e))\n\n\ndef update_value_for_key(dictionary, key, value, top_level_key=None):\n \"\"\"\n Recursive key_value search.\n\n Search through any nested dictionary with a passed key and\n modify all current values to a new value passed with it.\n\n Paramters\n ---------\n dictionary: dict to search through\n key: searched key\n value: new value for searched key\n top_level_key: to search only within this top level key if given\n\n Returns\n -------\n dictionary: updated dictionary\n \"\"\"\n if top_level_key:\n for i in dictionary:\n if i == top_level_key:\n for j in dictionary[top_level_key]:\n for x in update_value_for_key(j, key, value):\n x = value\n yield dictionary\n else:\n yield dictionary\n else:\n if isinstance(dictionary, list):\n for i in dictionary:\n for x in update_value_for_key(i, key, value):\n x = value\n yield dictionary\n elif isinstance(dictionary, dict):\n if key in dictionary:\n dictionary[key] = value\n yield dictionary\n for j in dictionary.values():\n for x in update_value_for_key(j, key, value):\n x = value\n yield dictionary\n\n\ndef merge_list_of_dicts(list_of_dicts):\n \"\"\"\n Merge list of dicts into one dict.\n\n Paramters\n ---------\n list_of_dicts : STRING\n list of dicts to be merged.\n\n Returns\n -------\n merged_dict : STRING\n \"\"\"\n merged_dict = {k: v for d in list_of_dicts for k, v in d.items()}\n return merged_dict\n\n\ndef main(filename, item_key, item_value, top_level_key):\n \"\"\"\n Update key parameter in a yaml file with a new value.\n\n Parameters\n ----------\n filename : STRING\n path to the file to be opened.\n item_key : STRING\n searched key.\n item_value : STRING\n new value of searched key.\n top_level_key : STRING\n limit search to top level only within the dictionary.\n\n Returns\n -------\n None.\n\n \"\"\"\n config = open_yaml_file(filename)\n # update config paramter\n new_config = merge_list_of_dicts(list(update_value_for_key\n (config, item_key, item_value,\n top_level_key)))\n update_yaml_file(filename, new_config)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Update parameter in yaml configuration file')\n parser.add_argument(\"--file\", help=\"yaml file to update\", required=True)\n parser.add_argument(\"--item_key\", help=\"parameter key to update\", required=True)\n parser.add_argument(\"--item_value\", help=\"value of the paramter key to update\", required=True)\n parser.add_argument(\"--top_level_key\", default=None,\n help=\"top level key to limit the search to\")\n args = parser.parse_args()\n main(args.file, args.item_key, args.item_value, args.top_level_key)\n","repo_name":"netfoundry/mop","sub_path":"docs/api/python/source/utility/update_parameter_yaml_file.py","file_name":"update_parameter_yaml_file.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"11112482380","text":"\"\"\"\n1. Напишите программу, которая принимает на вход вещественное число и показывает сумму его цифр.\n\"\"\"\n\nfloat_num = float(input('Enter a float number: '))\nlen_of_number = len(str(float_num).replace('.', ''))\nint_num = int(float_num * 10 ** (len_of_number - 1))\nresult = 0\n\nwhile int_num > 0:\n remainder = int_num % 10\n result += remainder\n int_num //= 10\n\nprint(result)\n","repo_name":"Alena0605/GB_Python_Seminars","sub_path":"Seminar_2/HW/2-1.py","file_name":"2-1.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"2172606256","text":"\"\"\"\nAt a lemonade stand, each lemonade costs $5.\n\nCustomers are standing in a queue to buy from you, and order one at a time (in the order specified by bills).\n\nEach customer will only buy one lemonade and pay with either a $5, $10, or $20 bill. You must provide the correct change to each customer, so that the net transaction is that the customer pays $5.\n\nNote that you don't have any change in hand at first.\n\nReturn true if and only if you can provide every customer with correct change.\n\"\"\"\nfrom typing import List\n\n\ndef lemonade_change(bills: List[int]) -> bool:\n money = {5: 0, 10: 0, 20: 0}\n\n for bill in bills:\n money[bill] += 1\n charge = bill - 5\n if charge != 0:\n if charge == 5:\n if money[charge] != 0:\n money[charge] -= 1\n else:\n return False\n if charge == 15:\n if money[10] >= 1 and money[5] >= 1:\n money[10] -= 1\n money[5] -= 1\n elif money[5] >= 3:\n money[5] -= 3\n else:\n return False\n return True\n\n\nif __name__ == '__main__':\n print(lemonade_change([5, 5, 5, 10, 20]))\n print(lemonade_change([5, 5, 10]))\n print(lemonade_change([10, 10]))\n print(lemonade_change([5, 5, 10, 10, 20]))\n","repo_name":"ervitis/challenges","sub_path":"leetcode/lemonade_change/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"72798195001","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\nimport os\r\n\r\ndef fun_Filter_Data(log):\r\n filter_ls = []\r\n with open(\"data/{0}\".format(log)) as f:\r\n for line in f:\r\n try:\r\n val = line.replace(\"\\n\", \"\").split(\"msec = \")[1]\r\n if float(val) > 25:\r\n filter_ls.append(line)\r\n except Exception as e:\r\n pass\r\n\r\n with open(\"result/{0}\".format(log.split(\"_\")[1]),\"a\") as f:\r\n for line in filter_ls:\r\n f.write(line)\r\n\r\ndef del_file(path):\r\n file_ls = os.listdir(path)\r\n for file in file_ls:\r\n file_path = os.path.join(path, file)\r\n if os.path.isdir(file_path):\r\n del_file(file_path)\r\n else:\r\n os.remove(file_path)\r\n\r\nif __name__ == \"__main__\":\r\n pwd = os.getcwd()\r\n file_list = os.listdir(pwd+\"/data/\")\r\n del_file(pwd+\"/result/\")\r\n for file in file_list:\r\n fun_Filter_Data(file)\r\n","repo_name":"edenpython/LogFilter","sub_path":"log_filter.py","file_name":"log_filter.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"38202452958","text":"#!/usr/bin/env python3\n\nimport shutil\nimport re\nfrom . import elements\n\n__strip_ANSI_escapes = re.compile(r\"\"\"\n \\x1b # literal ESC\n \\[ # literal [\n [;\\d]* # zero or more digits or semicolons\n [A-Za-z] # a letter\n \"\"\", re.VERBOSE).sub\n\n\ndef printlen(s):\n \"\"\"\n Return the printed length of a string\n \"\"\"\n return len(__strip_ANSI_escapes(\"\", s))\n\n\ndef crop_to_printlen(s, l):\n \"\"\"Return only as many characters such that the printed length\n of them is less than or equal l\"\"\"\n if printlen(s) <= l:\n return s\n i = l\n while printlen(s[:i]) < l:\n i += 1\n return s[:i]\n\n\n# Flags to influence the way basis sets are listed and how thay should be transformed\n# to kwargs for colorise or print_basissets\n__display_formats_base = {\n \"elements\": \"show_elements\",\n \"colour\": \"use_colour\",\n \"crop\": \"crop_fields\",\n}\n\n\n# Make sure that both positive and negative versions exist:\navailable_display_formats = [f for b in __display_formats_base for f in (\"no-\" + b, b)]\n\n\n# Ansi colour escape sequences\ncolours_ANSI = {\n \"yellow\": '\\033[93m',\n \"white\": '\\033[0m',\n \"green\": '\\033[92m',\n \"red\": '\\033[91m',\n \"cyan\": '\\033[96m',\n}\n\n\ndef colorise(string, colour, **kwargs):\n \"\"\"\n Use ANSI colour sequences to print a string in colour\n\n colour Colour to use for printing\n kwargs A list of keyword arguments, most importantly\n if use_colour=False, than colourised printing is turned off.\n \"\"\"\n if not kwargs.get(\"use_colour\", True) or colour is None:\n return string\n else:\n return colours_ANSI[colour] + string + colours_ANSI[\"white\"]\n\n\ndef parse_format_flags(format_flags):\n \"\"\"Parse the format for the list_basissets function\n and return a dictionary key -> value from it\"\"\"\n ret = {}\n\n def negate_flag(flag):\n return flag[3:] if flag.find(\"no-\") == 0 else \"no-\" + flag\n\n for flag in format_flags:\n if flag in __display_formats_base:\n kw = __display_formats_base[flag]\n ret[kw] = True\n elif negate_flag(flag) in __display_formats_base:\n kw = __display_formats_base[negate_flag(flag)]\n ret[kw] = False\n else:\n raise ValueError(\"Invalid format flag: {}\".format(flag))\n return ret\n\n\ndef print_basissets(findings, highlight_atnums=[],\n show_elements=False, use_colour=True, crop_fields=True,\n source_to_colour=None):\n \"\"\"\n Pretty print the basissets in the list\n highlight_atnums Highlight these elements in the list\n show_elements Print the list of elements\n use_colour Use colour for printing\n crop_fields Crop the output if it is too wide\n source_to_colour Mapping from the source of the basis set\n to an appropriate colour\n \"\"\"\n # Get IUPAC element list\n elem_list = elements.IUPAC_LIST\n\n def format_element_list(basset):\n \"\"\"\n Take a basis set dictionary and return a formatted string\n of the element list, taking the list of atnums to highlight into account.\n \"\"\"\n atnum_symbols = [(e[\"atnum\"], elem_list[e[\"atnum\"]][\"symbol\"])\n for e in basset[\"atoms\"]]\n return \",\".join(colorise(sym, \"yellow\", use_colour=use_colour)\n if highlight_atnums and atnum in highlight_atnums\n else sym for atnum, sym in atnum_symbols)\n\n # Determine maximal lengths of the strings we have:\n maxlen_name = max(1, max(len(bset[\"name\"]) for bset in findings))\n maxlen_descr = max(1, max(len(bset[\"description\"]) for bset in findings))\n\n # Ignore element string length if we don't care\n if show_elements:\n maxlen_elem = max(printlen(format_element_list(bset)) for bset in findings)\n else:\n maxlen_elem = 0\n\n # Adjust depending on width of terminal\n cols, _ = shutil.get_terminal_size(fallback=(120, 50))\n cols = max(120, cols)\n extra = 4 # What we need for column separators, ...\n\n if maxlen_name + maxlen_descr + maxlen_elem + extra > cols:\n # We don't crop the name ever, so compute the remainder:\n rem = cols - maxlen_name - extra\n\n if show_elements:\n # 2/3 for description, but only if its needed\n # and at least 1/3 for elements:\n maxlen_descr = min(maxlen_descr, max(50, 2 * rem // 3, rem - maxlen_elem - 1))\n maxlen_elem = max(50, rem - maxlen_descr)\n else:\n maxlen_descr = rem\n maxlen_elem = 0\n\n for bset in findings:\n # Maxlen values for this basis set\n # if colour is used, these values need to be altered\n # since ANSI colour escapes produce no \"length\" but count as a char\n maxlen = {\"name\": maxlen_name, \"description\": maxlen_descr,\n \"elements\": maxlen_elem}\n\n fargs = {\n \"description\": bset[\"description\"],\n \"elements\": format_element_list(bset),\n \"name\": bset[\"name\"],\n }\n if source_to_colour:\n fargs[\"name\"] = colorise(fargs[\"name\"], source_to_colour.get(bset[\"source\"]),\n use_colour=use_colour)\n\n if crop_fields:\n for key in fargs:\n if printlen(fargs[key]) > maxlen[key]:\n fargs[key] = crop_to_printlen(fargs[key], maxlen[key] - 3)\n if key in [\"elements\"]:\n # Remove the half-printed element number after the last \",\"\n icomma = fargs[key].rfind(\",\")\n fargs[key] = fargs[key][:icomma] + \"...\"\n else:\n fargs[key] += \"...\"\n\n for key in fargs:\n maxlen[key] += len(fargs[key]) - printlen(fargs[key])\n\n # Build format string:\n fstr = \"{name:\" + str(maxlen[\"name\"]) + \"s}\"\n fstr += \" {description:\" + str(maxlen[\"description\"]) + \"s}\"\n if show_elements:\n fstr += \" {elements:\" + str(maxlen[\"elements\"]) + \"s}\"\n\n print(fstr.format(**fargs))\n","repo_name":"mfherbst/look4bas","sub_path":"look4bas/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"5213901576","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom typing import Dict, Tuple, List, Set, Union, Optional\n\nfrom data_structure import Graph\nfrom experiments.evaluation_metrics import DataNodeMode\nfrom semantic_modeling.assembling.autolabel.align_graph import align_graph\nfrom semantic_modeling.assembling.autolabel.maxf1 import numbering_link_labels, get_numbered_link_label\n\n\ndef preserved_structure(gold_sm: Graph, pred_sm: Graph, gold_triples: Set[Tuple[int, bytes, Union[bytes, int]]]\n ) -> Tuple[Dict[int, bool], Dict[int, Optional[int]]]:\n alignment = align_graph(gold_sm, pred_sm, DataNodeMode.IGNORE_DATA_NODE)\n bijections = alignment['_bijections']\n best_bijection = None\n best_link2label = None\n best_score = -1\n\n # build example from this candidate model\n for bijection in bijections:\n link2label = {}\n for node in pred_sm.iter_class_nodes():\n outgoing_links = list(node.iter_outgoing_links())\n for link in outgoing_links:\n dest_node = link.get_target_node()\n if dest_node.is_class_node():\n dest_label = bijection.prime2x[link.target_id]\n else:\n dest_label = dest_node.label\n\n triple = (bijection.prime2x[link.source_id], link.label, dest_label)\n link2label[link.id] = triple in gold_triples\n score = sum(link2label.values())\n if score > best_score:\n best_score = score\n best_bijection = bijection\n best_link2label = link2label\n\n return best_link2label, best_bijection.prime2x\n","repo_name":"binh-vu/semantic-modeling","sub_path":"pysm/semantic_modeling/assembling/autolabel/preserved_structure.py","file_name":"preserved_structure.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"42057155710","text":"#21 BlackJack\r\n#Author: Ayesha Khalid\r\n#Description: User vs computer in a game of 21 BlackJack\r\n\r\n#assign your variables \r\ntotalPoints=0\r\nuserLoss=0\r\nuserWins=0\r\nuserTie=0\r\ncompTie=0\r\ncompLoss = 0\r\ncompWins= 0\r\n\r\nimport random\r\nimport time\r\n\r\n#defining suits for random suit generator \r\ndef suits ():\r\n suit=random.randint(1,4)\r\n if suit == 1:\r\n return \"♦\"\r\n elif suit == 2:\r\n return \"♥\"\r\n elif suit == 3:\r\n return \"♣\"\r\n else:\r\n return \"♠\"\r\nsuitType=suits() \r\n#defining cards \r\ndef drawCard ():\r\n card=random.randint(2,14)\r\n if card<=10:\r\n return card\r\n elif card==11:\r\n return'J'\r\n elif card==12:\r\n return 'Q'\r\n elif card==13:\r\n return 'K'\r\n else:\r\n return 'A'\r\n#defining points\r\ndef points (card):\r\n if card == \"J\" or card ==\"Q\" or card ==\"K\":\r\n return 10\r\n elif card == \"A\":\r\n if totalPoints >= 11:\r\n return 1\r\n if totalPoints == 10:\r\n return 11\r\n if totalPoints < 10:\r\n userIn= input(\"\\n You have drawn an Ace. Do you want the ace to be a 1 or a 11? \")\r\n if userIn == \"1\":\r\n #if user selects 1 then 1 would be added to total points\r\n return 1\r\n elif userIn == \"11\":\r\n #if user selects 11 then 11 would be added to total points\r\n return 11\r\n elif card <= 10:\r\n return card\r\n\r\ncomputerPoints=0\r\n#defining computer points\r\ndef comPoints (card1):\r\n if card1 == \"J\" or card1 ==\"Q\" or card1 ==\"K\":\r\n return 10\r\n elif card1 == \"A\":\r\n if computerPoints >= 11:\r\n return 1\r\n elif computerPoints <= 10:\r\n return 11\r\n elif card1 <= 10:\r\n return card1\r\n \r\n#defining fuction to output user's cards\r\ndef outputCard(a):\r\n return (\"\\n+ ------------- +\" + \"\\n|\\t\\t|\" + \"\\n|\\t\" + str(card)+ str(suitType)+ \"\\t|\" + \"\\n|\\t\\t|\" + \"\\n+ ------------- +\")\r\n\r\n#defining fuction to output computer's cards\r\ndef outputCard1(a):\r\n return (\"\\n+ ------------- +\" + \"\\n|\\t\\t|\" + \"\\n|\\t\" + str(card1)+ str(suitType)+ \"\\t|\" + \"\\n|\\t\\t|\" + \"\\n+ ------------- +\")\r\n\r\n\r\n#welcoming the user to the game\r\nprint(\"\\t\\tWelcome to 21 BlackJack!\\t\\t\")\r\nprint('\\t******ENJOY!!!May the best hand win.******\\t')\r\nhands=int(input(\"\\nHow many hands do you want to play: \"))\r\ngames=hands\r\n\r\n#loops \"hand\" amout of times for user\r\nfor hands in range (0,hands):\r\n cards=2\r\n print('\\nYour hand will begin in 3 seconds get ready to play!!!')\r\n time.sleep(3)\r\n totalPoints= 0\r\n computerPoints=0\r\n print('\\n\\n******NEW HAND!!!******')\r\n for counter in range (0,2):\r\n card = drawCard()\r\n totalPoints = totalPoints + points(card)\r\n print(outputCard(card))\r\n print (\"\\n Your total points are now \" + str(totalPoints))\r\n time.sleep(1.5)\r\n\r\n#while the user points is less than 21, it will ask user to hit or stand \r\n while totalPoints<21:\r\n user=input('\\n Press s for Stand or h for Hit: ')\r\n if user=='H' or user == 'h':\r\n card = drawCard()\r\n totalPoints = totalPoints + points(card)\r\n suitType=suits()\r\n print(outputCard(card))\r\n print (\"\\n Your total points are now \" + str(totalPoints))\r\n cards=cards+1\r\n if cards==5 and totalPoints<21:\r\n print('\\n Since you have reached 5 cards you have won this hand. Congratulations!!!')\r\n userWins=userWins+1\r\n break\r\n if totalPoints>21:\r\n break\r\n elif user==\"s\" or user==\"S\":\r\n break\r\n\r\n#checking if user wins or losses\r\n if totalPoints>21:\r\n print('\\n You Bust! Sorry you lost this hand. Therefore, the computer wins this hand.')\r\n userLoss= userLoss+1\r\n compWins= compWins+1\r\n elif totalPoints==21:\r\n print('\\n Congratulations you won this hand. Therefore, the computer loses this hand. ')\r\n userWins =userWins + 1\r\n compLoss=compLoss+1\r\n\r\n#if user chooses to stand, then the computer will be dealt the cards and play it's hand\r\n else:\r\n print(\"\\n Now the computer will be dealt cards\")\r\n time.sleep(2)\r\n\r\n #loop for the 2 starting hand cards\r\n for counter in range (0,2):\r\n card1 = drawCard()\r\n computerPoints = computerPoints + comPoints(card1)\r\n suitType=suits()\r\n print(outputCard1(card1))\r\n print (\"\\n The computer's total points are now \" + str(computerPoints))\r\n time.sleep(1.5)\r\n \r\n #while computer points is less than user's total points it will keep outputting cards \r\n while computerPointstotalPoints and computerPoints<21:\r\n compWins=compWins+1\r\n userLoss=userLoss+1\r\n print('\\n The computer wins this hand!!!')\r\n elif computerPoints>21:\r\n print('\\n The computer bust therefore user wins this hand!!!')\r\n userWins= userWins+1\r\n compLoss= compLoss+1\r\n elif computerPoints==totalPoints:\r\n print('\\n You and the computer have tied')\r\n userTie=userTie+1\r\n compTie=compTie+1\r\n elif computerPoints==21:\r\n ('The computer has 21 points, therefore the computer wins')\r\n compWins=compWins+1\r\n userLoss=userLoss+1\r\n \r\n #outputting the scoreboard \r\n time.sleep(1.5) \r\n print ('\\n '+\"*\" * 55)\r\n print(\" User wins: \"+str(userWins)+ \"|\" + \" User ties: \"+str(userTie)+\"|\" + \" User losses: \"+ str(userLoss))\r\n print(' '+\"*\" * 55)\r\n print(\" Computer wins: \"+str(compWins)+ \"|\" + \" Computer ties: \"+str(compTie)+\"|\" +\" Computer losses: \" + str(compLoss))\r\n print(\" \" + \"*\" * 55)\r\n\r\n #asking user if they want to quit the next hands\r\n time.sleep(2)\r\n games=games-1\r\n if games!=0:\r\n userQuit=input('\\n Do you want to quit, Yes or No:')\r\n if userQuit=='yes' or userQuit==\"Yes\" or userQuit=='y' or userQuit==\"Y\":\r\n print(\"\\n Hope you had fun playing BlackJack, can't wait to see you again!!!\")\r\n #if user quits they break out of the loop, resulting in ending the loop\r\n break\r\n elif games==0:\r\n print(\"\\n You have played all your hands. Hope you had fun playing BlackJack, can't wait to see you again!!!\")\r\n \r\n","repo_name":"AyeshaKhalid01/BlackJack","sub_path":"21-BlackJack.py","file_name":"21-BlackJack.py","file_ext":"py","file_size_in_byte":6708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"1705296830","text":"# Importando a Biblioteca \nfrom tkinter import*\n\n# Criando a janela \njanela = Tk()\n\n# Tamanho da janela \njanela.geometry(\"300x300\")\n\n# Atribuindo um título \njanela.title(\"Pimeira Interface com Python\")\n\n\n# Colocando um texto dentro da janela \ntexto =Label(janela, text=\"Olá mundo!\")\n# Exibindo o texto na janela \ntexto.pack()\n\n\n# Exibindo a tela \njanela.mainloop()","repo_name":"Anaportfolio/Projetos_com_Python","sub_path":"Interface/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"74772732281","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import KMeans\n\nfrom pprint import pprint\n\nfrom nltk.corpus import stopwords\nimport nltk.tokenize as tk\nfrom nltk.stem import WordNetLemmatizer\nfrom gensim.models import Word2Vec\n\nimport logging\nimport sys\n\nfrom encoder import infersent as inf\n\nfmt = \"[%(filename)s:%(lineno)s - %(funcName)s()] %(message)s\"\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO, format=fmt)\nlogger = logging.getLogger(__name__)\n\ndef get_stopwords():\n with open('stopwords.txt', 'r') as file:\n stop_words = set(file.read().split(\"\\n\"))\n logger.debug(stop_words)\n \n return stop_words\n\ndef text_preprocessing(txt):\n #tokenizes into sentences\n sentences = tk.sent_tokenize(txt)\n logger.debug(sentences)\n \n #get stop_words\n stop_words = get_stopwords()\n \n #tokenizes into words\n lemmatizer = WordNetLemmatizer()\n tokenizer = tk.RegexpTokenizer(r'\\w+')\n wordsList = [[lemmatizer.lemmatize(w.lower()) for w in tokenizer.tokenize(sentence) if not w in stop_words and not w.isdigit() and not len(w) <= 2] for sentence in sentences]\n \n #deletes unimportant sentences\n for i in range(len(wordsList)):\n if not wordsList[i]:\n sentences.pop(i)\n\n #deletes empty sentences\n wordsList = [x for x in wordsList if x]\n logger.debug(wordsList[0])\n \n return sentences, wordsList\n\ndef get_mean_vector(word2vec_model, words):\n # remove out-of-vocabulary words\n words = [word for word in words if word in word2vec_model.wv.vocab]\n logger.debug(words)\n if len(words) >= 1:\n return np.mean(word2vec_model[words], axis=0)\n else:\n return []\n\ndef wordvec_feature_extraction(wordsList):\n model = Word2Vec(wordsList, min_count=1)\n vectors = model[model.wv.vocab]\n logger.debug(vectors)\n \n return model, vectors\n\ndef sentvec_feature_extraction(wordsList):\n model = Sent2Vec(wordsList, size=100, min_count=1)\n\ndef kmeans(sentences, wordsList, k):\n wordvec_model, vectors = wordvec_feature_extraction(wordsList)\n \n kmeansmodel = KMeans(n_clusters=k, init='k-means++', max_iter=100, n_init=1)\n kmeansmodel.fit(vectors)\n \n order_centroids = kmeansmodel.cluster_centers_.argsort()[:, ::-1]\n terms = list(wordvec_model.wv.vocab)\n \n for i in range(k):\n logger.info(\"Cluster {}:\".format(i))\n for ind in order_centroids[i, :10]:\n logger.info(' {}'.format(terms[ind]))\n \n predictions = [(i, kmeansmodel.predict([get_mean_vector(wordvec_model, wordsList[i])])[0]) for i in range(len(sentences))]\n predictions.sort(key=lambda tup: (tup[1], tup[0]))\n logger.info(predictions)\n return predictions\n\ndef infersent_embedding(wordsList, sentences):\n model = inf.infersent_train(sentences)\n \n embedding = model.encode(sentences)\n logger.info(embedding)\n \n kmeansmodel = KMeans(n_clusters=10, init='k-means++', max_iter=100, n_init=1)\n kmeansmodel.fit(embedding)\n \n logger.info(model.encode([\"George Washington (February 22, 1732[b] – December 14, 1799) was an American political leader, military general, statesman, and Founding Father who served as the first president of the United States from 1789 to 1797.\"]))\n \n predictions = [(i, kmeansmodel.predict(model.encode([sentences[i]]))[0]) for i in range(len(sentences))]\n #predictions.sort(key=lambda tup: (tup[1], tup[0]))\n \n logger.info(predictions)\n \ndef main():\n \n #opens test text\n with open('test.txt', 'r') as file:\n testtxt = file.read().replace(\"\\n\", \"\")\n\n sentences, wordsList = text_preprocessing(testtxt)\n infersent_embedding(wordsList, sentences)\n \n predictions = kmeans(sentences, wordsList, 10)\n \n with open('result.txt', 'w') as file:\n current = 0\n for i, p in predictions:\n if p > current:\n file.write(\"\\n\\n\")\n current += 1\n \n file.write(sentences[i])\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"ayang923/text_clustering","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"39676602682","text":"# Petrol is collected for Indian Oil Corporation for sales from nearest ‘n’ storage points to the Collection point.\n# Given the amount of petrol from ‘n’ storage points in liters(L) and milli liters (mL), write a PAC chart, flowchart, algorithm and python code to compute the total quantity of oil in the collection point.\n# For example, if oil comes from 3 bunks in quantities 2 L 300 mL, 3 L 700 mL and 4 L 600 mL then the total quantity of oil in collection is 10 L 600 mL.\n\nstorage_points = int(input())\nquantity_list = []\ntotal_quantity = 0\n\nfor i in range(storage_points):\n L_qty = int(input())\n mL_qty = int(input()) * 0.001\n qty = L_qty + mL_qty\n total_quantity += qty\n \nL_total_qty = int(total_quantity)\nmL_total_qty = int(round((total_quantity - L_total_qty) * 1000, 0))\n\nprint(L_total_qty)\nprint(mL_total_qty)\n","repo_name":"KalkiEshwarD/VIT_BCSE101E","sub_path":"PPS1/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"19605937022","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 17 16:37:07 2018\n\n@author: 123\n\"\"\"\nimport telebot\nbot = telebot.TeleBot('481180883:AAFjGPMJVPKL0xaH2MksG4GlH5bprwwxdpI')\n\n\n@bot.message_handler(commands = ['start'])\ndef start_bot(message):\n bot.send_message(message.chat.id, \"Привет\")\n \n \nif __name__ == '__main__':\n bot.polling(none_stop = True) ","repo_name":"vrybakov/chat-bot01","sub_path":"test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"3904003455","text":"import sys\nsys.stdin = open(\"2.txt\",\"r\")\n\nloc = input()\nrow = int(loc[1])\ncol = int(ord(loc[0])) - int(ord('a')) + 1\n\nmoves = [[-1,-2], [-1,2], [-2,-1], [-2,1], [1,-2], [1,2], [2,-1], [2,1]]\n\nresult = 0\nfor move in moves:\n result_x = row + move[0]\n result_y = col + move[1]\n if (0 < result_x <= 8) and (0 < result_y <= 8):\n result += 1\nprint(result)\n","repo_name":"hanuirangroovy/TIL","sub_path":"이것이코딩테스트다with파이썬/구현/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"26108603082","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\n\ndef scale(X):\n samp_size, feat_size = X.shape\n for jt in range(feat_size):\n mean = np.mean(X[:, jt])\n std_dev = np.sqrt(np.var(X[:, jt], ddof=1))\n X[:, jt] = (X[:, jt]-mean) / std_dev\n return X\n\ndef ols(X, Y):\n gram = X.T*X\n beta = gram.I * X.T * Y\n return beta\n\ndef std_dev(X, var):\n samp_size, feat_size = X.shape\n beta_var_mat = var*(X.T*X).I\n return np.mat(np.sqrt(np.diag(beta_var_mat))).T\n \n \ndef eval_mpe(X, Y, beta_hat):\n samp_size, feat_size = X.shape\n Y_hat = X * beta_hat\n error = Y_hat - Y\n error_2 = np.array(error)**2\n# print('tst: ', np.mean(error_2), np.std(error_2, ddof=1)) \n \n# pf = pd.DataFrame({'1':np.array(Y_hat)[:, 0].tolist(),\n# '2':np.array(Y)[:, 0].tolist(),\n# '3':np.array(error)[:, 0].tolist()})\n# print(pf)\n# print(\"here is the mean(err)\", np.sqrt(1/(samp_size-1)*(tmp-np.mean(tmp)).T*(tmp-np.mean(tmp))))\n return 1/samp_size * (error.T*error)[0, 0]\n \n \nif __name__ == '__main__':\n ps = pd.read_table('./prostate.txt', float_precision='high')\n \n full_data = np.mat(ps.iloc[:, 1:-1])\n full_prdt = full_data[:, :-1]\n full_prdt = scale(full_prdt)\n full_resp = full_data[:, -1]\n\n samp_size, feat_size = full_prdt.shape\n print('{0} features, {1} samples'.format(feat_size, samp_size)) \n \n train_label = ps.loc[ps.train=='T'].iloc[:, 0]-1\n test_label = ps.loc[ps.train=='F'].iloc[:, 0]-1\n\n train_size = len(train_label)\n test_size = len(test_label)\n\n if test_size+train_size != samp_size:\n raise AssertionError\n \n trainX = full_prdt[train_label, :]\n trainY = full_resp[train_label, 0]\n\n testX = full_prdt[test_label, :]\n testY = full_resp[test_label, 0]\n\n print('Training samples: {0}\\nTest samples: {1}'.format(train_size, test_size))\n \n cor_mat = np.corrcoef(trainX.T)\n print(trainX.shape)\n print(cor_mat)\n \n trainX = np.concatenate((np.mat(np.ones([train_size, 1])), trainX), axis=1)\n# Add the intercept to the training data\n \n beta_hat = ols(trainX, trainY)\n# print(beta_hat)\n\n trainY_hat = trainX*beta_hat\n error = trainY - trainY_hat\n est_var = 1/(train_size-feat_size-1) * (error.T*error)[0, 0]\n# print(np.sqrt(est_var))\n\n beta_std_dev = std_dev(trainX, est_var)\n# print(beta_dev)\n\n if beta_hat.shape == beta_std_dev.shape:\n Z_score = np.divide(beta_hat, beta_std_dev)\n# print(Z_score)\n else:\n raise AssertionError\n \n labels = list(ps)[1:-2]\n labels.insert(0, 'intercept')\n index = ['Term', 'Coefficient', 'Std. Error', 'Z Score']\n df = pd.DataFrame({index[0]:labels,\n index[1]:beta_hat.reshape(1,feat_size+1).tolist()[0],\n index[2]:beta_std_dev.reshape(1, feat_size+1).tolist()[0],\n index[3]:Z_score.reshape(1, feat_size+1).tolist()[0],})\n df = df[index].round({index[1]:2, index[2]:2, index[3]:2})\n print(df)\n \n test_mpe = eval_mpe(trainX, trainY, beta_hat)\n# test_mpe = eval_mpe(np.concatenate((np.mat(np.ones([test_size, 1])), testX), axis=1), testY, beta_hat)\n print('Test Error : {0:0.3f}'.format(test_mpe))\n \n print('Still have not got the meaning of \"Std Error\" on ESL Page 63.')\n \n \n'''\n cavol= trainX[:, 1]\n Vcavol = 1/(train_size-1) * np.dot(cavol.T-np.mean(cavol), cavol-np.mean(cavol))\n Vage = 1/(train_size-1) * np.dot((age.T-np.mean(age)), (age-np.mean(age)))\n Cov_w_c = 1/(train_size-1)*np.dot(age.T-np.mean(age), cavol-np.mean(cavol))\n cor = Cov_w_c/np.sqrt(Vage*Vcavol)\n'''\n ","repo_name":"zhaoyu775885/ml","sub_path":"LR/ols.py","file_name":"ols.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"7602355408","text":"'''\nQ.1 Construa um algoritmo para ler um número inteiro, positivo de três dígitos, e gerar outro número formado pelos dígitos invertidos do número lido. \n Ex: \nNumeroLido = 123 \nNumeroGerado = 321 \nDica: Observe os resultados das funções Quociente e Resto de um número por 10. \n\n\n'''\n\n\ndef main():\n\n while True:\n global num\n num = input(\"Digite um numero inteiro: \")\n \n\n if len(num) == 3:\n reverso()\n print(reverso()[::-1])\n break\n\n elif len(num) < 3:\n print(\"Digite um numero inteiro com 3 digitos\")\n \n \n else:\n print(\"Digite um numero inteiro com 3 digitos\")\n \n\n\ndef reverso():\n \n lista = []\n lista.append(num)\n \n return num\n \n \n\nmain()\n\n\n","repo_name":"Lethycia-dev/Atividades-Curso","sub_path":"uc1/aula 9 - dinamica/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"18123050317","text":"import cv2\nimport time\nimport numpy as np\nfrom random import randint\nimport argparse\n\nimage_file = \"h1\"\nparser = argparse.ArgumentParser(description=\"Run keypoint detection\")\nparser.add_argument(\"--device\", default=\"cpu\", help=\"Device to inference on\")\nparser.add_argument(\n \"--image_file\", default=\"data_original/\" + image_file + \".png\", help=\"Input image\"\n)\n\nargs = parser.parse_args()\n\n\nimage1 = cv2.imread(args.image_file)\n# image1 = cv2.resize(\n# image1,\n# dsize=(image1.shape[0] * (2.0 / 3), image1.shape[1] * (2.0 / 3)),\n# interpolation=cv2.INTER_CUBIC,\n# )\n\nprotoFile = \"pose/coco/pose_deploy_linevec.prototxt\"\nweightsFile = \"pose/coco/pose_iter_440000.caffemodel\"\nnPoints = 18\n# COCO Output Format\nkeypointsMapping = [\n \"Nose\",\n \"Neck\",\n \"R-Sho\",\n \"R-Elb\",\n \"R-Wr\",\n \"L-Sho\",\n \"L-Elb\",\n \"L-Wr\",\n \"R-Hip\",\n \"R-Knee\",\n \"R-Ank\",\n \"L-Hip\",\n \"L-Knee\",\n \"L-Ank\",\n \"R-Eye\",\n \"L-Eye\",\n \"R-Ear\",\n \"L-Ear\",\n]\n\nPOSE_PAIRS = [[1, 2], [1, 5], [2, 3], [3, 4], [5, 6], [6, 7], [1, 0]]\n\n# index of pafs correspoding to the POSE_PAIRS\n# e.g for POSE_PAIR(1,2), the PAFs are located at indices (31,32) of output, Similarly, (1,5) -> (39,40) and so on.\nmapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [47, 48]]\n\ncolors = [\n [0, 100, 255],\n [0, 100, 255],\n [0, 255, 255],\n [0, 100, 255],\n [0, 255, 255],\n [0, 100, 255],\n [0, 255, 0],\n [255, 200, 100],\n [255, 0, 255],\n [0, 255, 0],\n [255, 200, 100],\n [255, 0, 255],\n [0, 0, 255],\n [255, 0, 0],\n [200, 200, 0],\n [255, 0, 0],\n [200, 200, 0],\n [0, 0, 0],\n]\npoly_colors = [[255, 0, 0], [0, 255, 0], [0, 0, 255], [0, 0, 0]]\n\ncolor = [0, 0, 0]\n\n\ndef getKeypoints(probMap, threshold=0.1):\n mapSmooth = cv2.GaussianBlur(probMap, (3, 3), 0, 0)\n\n mapMask = np.uint8(mapSmooth > threshold)\n keypoints = []\n\n # find the blobs\n contours, _ = cv2.findContours(mapMask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # for each blob find the maxima\n for cnt in contours:\n blobMask = np.zeros(mapMask.shape)\n blobMask = cv2.fillConvexPoly(blobMask, cnt, 1)\n maskedProbMap = mapSmooth * blobMask\n _, maxVal, _, maxLoc = cv2.minMaxLoc(maskedProbMap)\n keypoints.append(maxLoc + (probMap[maxLoc[1], maxLoc[0]],))\n\n return keypoints\n\n\n# Find valid connections between the different joints of a all persons present\ndef getValidPairs(output):\n valid_pairs = []\n invalid_pairs = []\n n_interp_samples = 10\n paf_score_th = 0.1\n conf_th = 0.7\n # loop for every POSE_PAIR\n for k in range(len(mapIdx)):\n # A->B constitute a limb\n pafA = output[0, mapIdx[k][0], :, :]\n pafB = output[0, mapIdx[k][1], :, :]\n pafA = cv2.resize(pafA, (frameWidth, frameHeight))\n pafB = cv2.resize(pafB, (frameWidth, frameHeight))\n\n # Find the keypoints for the first and second limb\n candA = detected_keypoints[POSE_PAIRS[k][0]]\n candB = detected_keypoints[POSE_PAIRS[k][1]]\n nA = len(candA)\n nB = len(candB)\n\n # If keypoints for the joint-pair is detected\n # check every joint in candA with every joint in candB\n # Calculate the distance vector between the two joints\n # Find the PAF values at a set of interpolated points between the joints\n # Use the above formula to compute a score to mark the connection valid\n\n if nA != 0 and nB != 0:\n valid_pair = np.zeros((0, 3))\n for i in range(nA):\n max_j = -1\n maxScore = -1\n found = 0\n for j in range(nB):\n # Find d_ij\n d_ij = np.subtract(candB[j][:2], candA[i][:2])\n norm = np.linalg.norm(d_ij)\n if norm:\n d_ij = d_ij / norm\n else:\n continue\n # Find p(u)\n interp_coord = list(\n zip(\n np.linspace(candA[i][0], candB[j][0], num=n_interp_samples),\n np.linspace(candA[i][1], candB[j][1], num=n_interp_samples),\n )\n )\n # Find L(p(u))\n paf_interp = []\n for k in range(len(interp_coord)):\n paf_interp.append(\n [\n pafA[\n int(round(interp_coord[k][1])),\n int(round(interp_coord[k][0])),\n ],\n pafB[\n int(round(interp_coord[k][1])),\n int(round(interp_coord[k][0])),\n ],\n ]\n )\n # Find E\n paf_scores = np.dot(paf_interp, d_ij)\n avg_paf_score = sum(paf_scores) / len(paf_scores)\n\n # Check if the connection is valid\n # If the fraction of interpolated vectors aligned with PAF is higher then threshold -> Valid Pair\n if (\n len(np.where(paf_scores > paf_score_th)[0]) / n_interp_samples\n ) > conf_th:\n if avg_paf_score > maxScore:\n max_j = j\n maxScore = avg_paf_score\n found = 1\n # Append the connection to the list\n if found:\n valid_pair = np.append(\n valid_pair, [[candA[i][3], candB[max_j][3], maxScore]], axis=0\n )\n\n # Append the detected connections to the global list\n valid_pairs.append(valid_pair)\n else: # If no keypoints are detected\n print(\"No Connection : k = {}\".format(k))\n invalid_pairs.append(k)\n valid_pairs.append([])\n return valid_pairs, invalid_pairs\n\n\n# This function creates a list of keypoints belonging to each person\n# For each detected valid pair, it assigns the joint(s) to a person\ndef getPersonwiseKeypoints(valid_pairs, invalid_pairs):\n # the last number in each row is the overall score\n personwiseKeypoints = -1 * np.ones((0, 19))\n\n for k in range(len(mapIdx)):\n if k not in invalid_pairs:\n partAs = valid_pairs[k][:, 0]\n partBs = valid_pairs[k][:, 1]\n indexA, indexB = np.array(POSE_PAIRS[k])\n\n for i in range(len(valid_pairs[k])):\n found = 0\n person_idx = -1\n for j in range(len(personwiseKeypoints)):\n if personwiseKeypoints[j][indexA] == partAs[i]:\n person_idx = j\n found = 1\n break\n\n if found:\n personwiseKeypoints[person_idx][indexB] = partBs[i]\n personwiseKeypoints[person_idx][-1] += (\n keypoints_list[partBs[i].astype(int), 2] + valid_pairs[k][i][2]\n )\n\n # if find no partA in the subset, create a new subset\n elif not found and k < 17:\n row = -1 * np.ones(19)\n row[indexA] = partAs[i]\n row[indexB] = partBs[i]\n # add the keypoint_scores for the two keypoints and the paf_score\n row[-1] = (\n sum(keypoints_list[valid_pairs[k][i, :2].astype(int), 2])\n + valid_pairs[k][i][2]\n )\n personwiseKeypoints = np.vstack([personwiseKeypoints, row])\n return personwiseKeypoints\n\n\nframeWidth = image1.shape[1]\nframeHeight = image1.shape[0]\n\nt = time.time()\nnet = cv2.dnn.readNetFromCaffe(protoFile, weightsFile)\nif args.device == \"cpu\":\n net.setPreferableBackend(cv2.dnn.DNN_TARGET_CPU)\n print(\"Using CPU device\")\nelif args.device == \"gpu\":\n net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n print(\"Using GPU device\")\n\n# Fix the input Height and get the width according to the Aspect Ratio\ninHeight = 368\ninWidth = int((inHeight / frameHeight) * frameWidth)\n\ninpBlob = cv2.dnn.blobFromImage(\n image1, 1.0 / 255, (inWidth, inHeight), (0, 0, 0), swapRB=False, crop=False\n)\nnet.setInput(inpBlob)\n\noutput = net.forward()\nprint(\"Time Taken in forward pass = {}\".format(time.time() - t))\n\ndetected_keypoints = []\nkeypoints_list = np.zeros((0, 3))\nkeypoint_id = 0\nthreshold = 0.1\ninterested_joints = [0, 1, 2, 3, 4, 5, 6, 7]\nfor part in interested_joints:\n probMap = output[0, part, :, :]\n probMap = cv2.resize(probMap, (image1.shape[1], image1.shape[0]))\n keypoints = getKeypoints(probMap, threshold)\n print(\"Keypoints - {} : {}\".format(keypointsMapping[part], keypoints))\n keypoints_with_id = []\n for i in range(len(keypoints)):\n keypoints_with_id.append(keypoints[i] + (keypoint_id,))\n keypoints_list = np.vstack([keypoints_list, keypoints[i]])\n keypoint_id += 1\n\n detected_keypoints.append(keypoints_with_id)\n\n# frameClone = image1.copy()\nimage1_height, image1_width, _ = image1.shape\nframeClone = np.ones((image1_height, image1_width, 3), dtype=np.uint8) * 255\n\n\nvalid_pairs, invalid_pairs = getValidPairs(output)\npersonwiseKeypoints = getPersonwiseKeypoints(valid_pairs, invalid_pairs)\n# print(personwiseKeypoints)\n\n\n# 사람마다 검출한 스켈레톤 이어주기\nfor i in range(len(POSE_PAIRS) - 1):\n for n in range(len(personwiseKeypoints)):\n index = personwiseKeypoints[n][np.array(POSE_PAIRS[i])]\n # print(personwiseKeypoints[n][0].astype(int))\n if -1 in index:\n continue\n B = np.int32(keypoints_list[index.astype(int), 0])\n A = np.int32(keypoints_list[index.astype(int), 1])\n # dots.append([B[0], A[0]])\n\n cv2.line(frameClone, (B[0], A[0]), (B[1], A[1]), [0, 0, 0], 10, cv2.LINE_AA)\n\n\nedge_list = np.zeros((0, 3))\n\n# 1. 모든 게임 참가자 좌우 손목 (팔꿈치) numpy array에 저장\nfor person in range(len(personwiseKeypoints)):\n right_wrist_index = (\n personwiseKeypoints[person][4]\n if personwiseKeypoints[person][4] != -1\n else personwiseKeypoints[person][3]\n )\n left_wrist_index = (\n personwiseKeypoints[person][7]\n if personwiseKeypoints[person][7] != -1\n else personwiseKeypoints[person][6]\n )\n edge_list = np.vstack(\n [edge_list, np.int32(keypoints_list[right_wrist_index.astype(int)])]\n )\n edge_list = np.vstack(\n [edge_list, np.int32(keypoints_list[left_wrist_index.astype(int)])]\n )\n\n# 2. 하나씩 탐색하면서 가장 가까운 점 찾기\nmatched_index = set()\nfor index in range(len(edge_list)):\n if index in matched_index:\n continue\n matched_index.add(index)\n\n min_dist = np.linalg.norm(np.array([image1_height, image1_width]))\n min_index = -1\n for other_index in range(len(edge_list)):\n # 이미 연결한 손 or 같은 사람의 손 : pass\n if other_index in matched_index or index // 2 == other_index // 2:\n continue\n cur_dist = np.linalg.norm(edge_list[index] - edge_list[other_index])\n # 3. 기준 점 + 가장 가까운 점 체크\n if cur_dist < min_dist:\n min_index, min_dist = other_index, cur_dist\n\n if min_index == -1:\n continue\n matched_index.add(min_index)\n\n toDots = np.int32(edge_list[index])\n fromDots = np.int32(edge_list[min_index])\n cv2.line(\n frameClone,\n (toDots[0], toDots[1]),\n (fromDots[0], fromDots[1]),\n [0, 0, 0],\n 10,\n cv2.LINE_AA,\n )\n\nframeClone = cv2.resize(frameClone, dsize=(32, 32), interpolation=cv2.INTER_LINEAR)\n\ncv2.imshow(\"Detected Pose\", frameClone)\ncv2.imwrite(\"./data_process/\" + image_file + \".png\", frameClone)\ncv2.waitKey(0)\n","repo_name":"Giyoooon/openpose_multiperson","sub_path":"multi-person-openpose.py","file_name":"multi-person-openpose.py","file_ext":"py","file_size_in_byte":12102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"39387938867","text":"import os\nimport sys\n\nimport pytest\nfrom httpx import Response\n\nroot_folder = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(root_folder)\n\nfrom task_4 import make_request, request_data\n\n\n@pytest.mark.asyncio\nasync def test_make_request():\n url = \"http://ya.ru\"\n response = await make_request(url)\n assert isinstance(response, Response)\n assert response.status_code == 301\n\n\n@pytest.mark.asyncio\nasync def test_request_data():\n url = \"http://httpbin.org/delay/3\"\n num_requests = 10\n await request_data(url, num_requests)\n\n\n@pytest.mark.asyncio\nasync def test_request_data_with_invalid_url():\n url = \"http://invalid-url\"\n with pytest.raises(Exception):\n await request_data(url)\n\n\nif __name__ == \"__main__\":\n pytest.main()\n","repo_name":"shamankub/rubitech_test","sub_path":"tests/test_task_4.py","file_name":"test_task_4.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"29945210344","text":"\"\"\"bitbucket reader\"\"\"\n\nfrom typing import List, Optional\n\nimport base64\nimport os\nimport requests\nfrom llama_index.readers.base import BaseReader\nfrom llama_index.readers.schema.base import Document\n\n\nclass BitbucketReader(BaseReader):\n \"\"\"Bitbucket reader.\n\n Reads the content of files in Bitbucket repositories.\n\n \"\"\"\n\n def __init__(\n self,\n base_url: Optional[str] = None,\n project_key: Optional[str] = None,\n branch: Optional[str] = \"refs/heads/develop\",\n extensions_to_skip: Optional[List] = [],\n ) -> None:\n \"\"\"Initialize with parameters.\"\"\"\n if os.getenv(\"BITBUCKET_USERNAME\") is None:\n raise ValueError(\"Could not find a Bitbucket username.\")\n if os.getenv(\"BITBUCKET_API_KEY\") is None:\n raise ValueError(\"Could not find a Bitbucket api key.\")\n if base_url is None:\n raise ValueError(\"You must provide a base url for Bitbucket.\")\n if project_key is None:\n raise ValueError(\"You must provide a project key for Bitbucket repository.\")\n self.base_url = base_url\n self.project_key = project_key\n self.branch = branch\n self.extensions_to_skip = extensions_to_skip\n\n def get_headers(self):\n username = os.getenv(\"BITBUCKET_USERNAME\")\n api_token = os.getenv(\"BITBUCKET_API_KEY\")\n auth = base64.b64encode(f\"{username}:{api_token}\".encode()).decode()\n return {\"Authorization\": f\"Basic {auth}\"}\n\n def get_slugs(self) -> List:\n \"\"\"\n Get slugs of the specific project.\n \"\"\"\n repos_url = (\n f\"{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/\"\n )\n headers = self.get_headers()\n slugs = []\n response = requests.get(repos_url, headers=headers)\n\n if response.status_code == 200:\n repositories = response.json()[\"values\"]\n for repo in repositories:\n repo_slug = repo[\"slug\"]\n slugs.append(repo_slug)\n return slugs\n\n def load_all_file_paths(self, slug, branch, directory_path=\"\", paths=[]):\n \"\"\"\n Go inside every file that is present in the repository and get the paths for each file\n \"\"\"\n content_url = f\"{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/{slug}/browse/{directory_path}\"\n\n query_params = {\n \"at\": branch,\n }\n headers = self.get_headers()\n response = requests.get(content_url, headers=headers, params=query_params)\n response = response.json()\n if \"errors\" in response:\n raise ValueError(response[\"errors\"])\n children = response[\"children\"]\n for value in children[\"values\"]:\n if value[\"type\"] == \"FILE\":\n if value[\"extension\"] not in self.extensions_to_skip:\n paths.append(\n {\n \"slug\": slug,\n \"path\": f'{directory_path}/{value[\"path\"][\"toString\"]}',\n }\n )\n elif value[\"type\"] == \"DIRECTORY\":\n self.load_all_file_paths(\n slug=slug,\n branch=branch,\n directory_path=f'{directory_path}/{value[\"path\"][\"toString\"]}',\n paths=paths,\n )\n\n def load_text_by_paths(self, slug, file_path, branch) -> List:\n \"\"\"\n Go inside every file that is present in the repository and get the paths for each file\n \"\"\"\n content_url = f\"{self.base_url}/rest/api/latest/projects/{self.project_key}/repos/{slug}/browse{file_path}\"\n\n query_params = {\n \"at\": branch,\n }\n headers = self.get_headers()\n response = requests.get(content_url, headers=headers, params=query_params)\n children = response.json()\n if \"errors\" in children:\n raise ValueError(children[\"errors\"])\n if \"lines\" in children:\n return children[\"lines\"]\n return []\n\n def load_text(self, paths) -> List:\n text_dict = []\n for path in paths:\n lines_list = self.load_text_by_paths(\n slug=path[\"slug\"], file_path=path[\"path\"], branch=self.branch\n )\n concatenated_string = \"\"\n\n for line_dict in lines_list:\n text = line_dict.get(\"text\", \"\")\n concatenated_string = concatenated_string + \" \" + text\n\n text_dict.append(concatenated_string)\n return text_dict\n\n def load_data(self) -> List[Document]:\n \"\"\"Return a list of Document made of each file in Bitbucket.\"\"\"\n slugs = self.get_slugs()\n paths = []\n for slug in slugs:\n self.load_all_file_paths(\n slug=slug, branch=self.branch, directory_path=\"\", paths=paths\n )\n texts = self.load_text(paths)\n return [Document(text=text) for text in texts]\n","repo_name":"run-llama/llama-hub","sub_path":"llama_hub/bitbucket/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"en","doc_type":"code","stars":2565,"dataset":"github-code","pt":"46"} +{"seq_id":"73731023820","text":"\"\"\" delete_queue.py: using boto3, search queue list and delete all \"\"\"\nimport os\n\nimport boto3\n\n\nsqs_client = boto3.client(\n \"sqs\",\n aws_access_key_id=os.getenv(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\"),\n)\n\n\nprefix = \"asdf\"\nresponse = sqs_client.list_queues(QueueNamePrefix=prefix, MaxResults=200) # max 1000\n\nqueues = response[\"QueueUrls\"]\n\nfor qq in queues:\n resp = sqs_client.delete_queue(QueueUrl=qq)\n print(f\"XXX: deleted: {qq}\")\n if resp[\"ResponseMetadata\"][\"HTTPStatusCode\"] != 200:\n print(f\"failed: {qq}\")\n","repo_name":"nellaG/snippet","sub_path":"aws/delete_queue.py","file_name":"delete_queue.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"43143695198","text":"from glob import glob\nimport os\nimport os.path\nimport shutil\nimport subprocess\nimport sys\n\nfrom argparse import ArgumentParser\n\ndef pad_number(num):\n length = len(str(num))\n return ((3 - length) * '0') + str(num)\n\nddpost_par = \"\"\"\n'{e_file}' = name of file with E stored\n'{outfile}' = prefix for name of VTR output files\n{e_e2} = IVTR (set to 1 to create VTR output)\n0 = ILINE (set to 1 to evaluate E along a line)\n\"\"\"\n\n\ndef main():\n argparser = ArgumentParser()\n argparser.add_argument('subdir')\n args = argparser.parse_args()\n\n os.chdir(args.subdir)\n subprocess.call('mkdir vtr', shell=True)\n subprocess.call('cp ~/scratch/exec/ddpostprocess .', shell=True)\n\n vtrdir = os.path.join(args.subdir, 'vtr')\n\n efiles = glob('w*r000k000.E1')\n efiles.sort()\n i = 0\n for i, efile in enumerate(efiles):\n with open('ddpostprocess.par', 'w') as fp:\n fp.write(ddpost_par.format(\n e_file=efile,\n outfile='vtr/wav_{}'.format(i),\n e_e2='1'\n ))\n\n subprocess.call('./ddpostprocess', shell=True)\n subprocess.call('rm ddpostprocess.par', shell=True)\n\n unchanged = glob('vtr/*')\n\n for file in unchanged:\n newfile = file.replace('_1.vtr', '.vtr')\n shutil.move(file, newfile)\n\n os.chdir('..')\n shutil.move(vtrdir, '{}_vtr'.format(args.subdir))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"KopelmanLab/Au_Nanosnake_DDA","sub_path":"ddscat-runner/ddpostprocess.py","file_name":"ddpostprocess.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"46322277277","text":"# object-oriented programming\n\nclass PartyAnimal:\n x = 0\n name = \"\"\n\n def __init__(self, z):\n self.name = z\n print(self.name, \" constructed\")\n\n # constructor\n def party(self):\n self.x = self.x + 1\n print(self.name, \" party count \", self.x)\n \n # destructor\n def __del__(self):\n print(\"I am destructed\", self.x)\n\n# create instance and assign it\ns = PartyAnimal(\"Sally\")\ns.party()\n\nj = PartyAnimal(\"John\")\nj.party()\ns.party()\n\n# inheritance\nclass FootballFan(PartyAnimal):\n points = 0\n def touchdown(self):\n self.points = self.points + 7\n self.party()\n print(self.name, \" points \", self.points)\n\nf = FootballFan(\"Frank\")\nf.party()\nf.touchdown()","repo_name":"jhnns1/py4e_revision","sub_path":"oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"8226992314","text":"from autopolarizer import AutoPolarizer\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"degree\", type=int, help=\"polarizer angle [deg]\")\n parser.add_argument(\"-p\", \"--port\", type=str, default=\"/dev/tty.usbserial-FTRWB1RN\", help=\"srial port name\")\n parser.add_argument(\"-r\", \"--reset\", action=\"store_true\", help=\"determines whether to perform a reset\")\n args = parser.parse_args()\n \n #command line arguments\n port = args.port\n deg = args.degree\n is_reset = args.reset\n\n #connect to the polarizer\n polarizer = AutoPolarizer(port=port)\n \n #set speed as default\n polarizer.set_speed()\n \n #reset (if required)\n if is_reset:\n polarizer.reset()\n \n #rotate the polarizer\n polarizer.degree = deg\n \n #explicit disconnect request\n del polarizer\n \nif __name__==\"__main__\":\n main()\n","repo_name":"elerac/hikari","sub_path":"apolarizer.py","file_name":"apolarizer.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"31221217225","text":"from string import ascii_lowercase\nfrom heapq import heappop, heappush\n\ninputFile = open(\"day12\\input.txt\", 'r')\ndata = inputFile.read().strip().split()\n# print(data)\n\ngrid = [list(line) for line in data]\nnumRows = len(grid)\nnumCols = len(grid[0])\n\nfor i in range(numRows):\n for j in range(numCols):\n char = grid[i][j]\n if char == \"S\":\n start = i, j\n if char == \"E\":\n end = i, j\n\n\ndef height(s):\n if s in ascii_lowercase:\n return ascii_lowercase.index(s)\n if s == \"S\":\n return 0\n if s == \"E\":\n return 25\n\n\n# Determine neighbors\ndef neighbors(i, j):\n for di, dj in [[1, 0], [-1, 0], [0, 1], [0, -1]]:\n ii = i + di\n jj = j + dj\n\n # check if neighbors are in grid\n if not (0 <= ii < numRows and 0 <= jj < numCols):\n continue\n \n # check if neighbors' height is reachable\n if height(grid[ii][jj]) <= height(grid[i][j]) + 1:\n yield ii, jj\n\ndef neighbors2(i, j):\n for di, dj in [[1, 0], [-1, 0], [0, 1], [0, -1]]:\n ii = i + di\n jj = j + dj\n\n # check if neighbors are in grid\n if not (0 <= ii < numRows and 0 <= jj < numCols):\n continue\n \n # check if neighbors' height is reachable\n if height(grid[ii][jj]) >= height(grid[i][j]) - 1:\n yield ii, jj\n\n\ndef part1(input):\n # Dijkstra's\n visited = [[False] * numCols for _ in range(numRows)]\n heap = [(0, start[0], start[1])]\n\n while True:\n steps, i, j = heappop(heap)\n\n if visited[i][j]:\n continue\n visited[i][j] = True\n\n if (i, j) == end:\n print(steps)\n break\n\n for ii, jj in neighbors(i, j):\n heappush(heap, (steps + 1, ii, jj))\n\ndef part2(input):\n # since there are multiple start locations, it is more economical to start from the 'end'\n # and terminate at a 'start' point\n visited = [[False] * numCols for _ in range(numRows)]\n heap = [(0, end[0], end[1])]\n\n while True:\n steps, i, j = heappop(heap)\n\n if visited[i][j]:\n continue\n visited[i][j] = True\n\n if height(grid[i][j]) == 0:\n print(steps)\n break\n\n for ii, jj in neighbors2(i, j):\n heappush(heap, (steps + 1, ii, jj))\n\nif __name__ == \"__main__\":\n part1(data)\n part2(data)","repo_name":"thereseLYR/adventofcode2022","sub_path":"day12/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"11135474036","text":"import pytest\n\nfrom homeassistant.components.light import DOMAIN as LIGHT_DOMAIN \nfrom homeassistant.const import (\n ATTR_ENTITY_ID,\n SERVICE_TURN_ON,\n SERVICE_TURN_OFF,\n STATE_ON,\n STATE_OFF,\n)\n\nfrom custom_components.react.const import (\n ACTION_CHANGE,\n ACTION_TOGGLE,\n ATTR_PLUGIN_MODULE,\n ATTR_STATE, \n)\nfrom custom_components.react.plugin.const import ATTR_CONFIG\nfrom custom_components.react.plugin.light.const import ATTR_LIGHT_PROVIDER\n\nfrom tests._plugins.light_mock.setup import LIGHT_MOCK_PROVIDER\nfrom tests.common import FIXTURE_WORKFLOW_NAME\nfrom tests.const import (\n ATTR_ENTITY_STATE, \n ATTR_SETUP_MOCK_PROVIDER, \n TEST_CONFIG,\n)\nfrom tests.tst_context import TstContext\n\nFIXTURE_LIGHT_NAME = \"light_name\"\nFIXTURE_VALUE_BEFORE = \"value_before\"\nFIXTURE_VALUE_AFTER = \"value_after\"\nFIXTURE_NAME_INITIAL = \"name_initial\"\nFIXTURE_EXPECTED_SERVICE = \"expected_service\"\nFIXTURE_INITIAL_STATE = \"initial_state\"\nFIXTURE_ENTITY_ID = \"entity_id\"\n\n\ndef set_test_config(test_context: TstContext,\n setup_mock_provider: bool = False,\n light_entity_id: str = None,\n light_entity_state: str = None\n) -> dict:\n result = test_context.hass.data[TEST_CONFIG] = {\n ATTR_SETUP_MOCK_PROVIDER: setup_mock_provider\n }\n if light_entity_id:\n result[ATTR_ENTITY_ID] = light_entity_id\n if light_entity_state != None:\n result[ATTR_ENTITY_STATE] = light_entity_state\n\n\ndef get_mock_plugin(\n light_provider: str = None,\n) -> dict:\n result = {\n ATTR_PLUGIN_MODULE: \"tests._plugins.light_mock\",\n ATTR_CONFIG: {} \n }\n if light_provider:\n result[ATTR_CONFIG][ATTR_LIGHT_PROVIDER] = light_provider\n return result\n\n\n@pytest.mark.parametrize(f\"{FIXTURE_WORKFLOW_NAME},{FIXTURE_LIGHT_NAME}\", [\n (\"light_turn_on_test\", \"initial_off\"),\n (\"light_turn_off_test\", \"initial_on\"),\n (\"light_toggle_test\", \"initial_off\"),\n])\nasync def test_light_plugin_api_set_invalid_entity(test_context: TstContext, workflow_name: str, light_name: str):\n mock_plugin = get_mock_plugin()\n set_test_config(test_context)\n\n await test_context.async_start_react([mock_plugin])\n await test_context.async_send_reaction_event()\n test_context.verify_plugin_data_not_sent()\n test_context.verify_has_log_warning(f\"1 - light.light_{light_name}_test not found\")\n\n\n@pytest.mark.parametrize(f\"{FIXTURE_WORKFLOW_NAME},{FIXTURE_ENTITY_ID}\", [\n (\"light_turn_on_test\", \"light.light_initial_off_test\"),\n (\"light_turn_off_test\", \"light.light_initial_on_test\"),\n (\"light_toggle_test\", \"light.light_initial_off_test\"),\n])\nasync def test_light_plugin_api_invalid_provider(test_context: TstContext, workflow_name: str, entity_id: str):\n invalid_provider = \"invalid\"\n mock_plugin = get_mock_plugin()\n set_test_config(test_context,\n light_entity_id=entity_id,\n light_entity_state=\"test\"\n )\n \n data = {\n ATTR_LIGHT_PROVIDER: invalid_provider\n }\n\n await test_context.async_start_react([mock_plugin])\n await test_context.async_send_reaction_event(data=data)\n test_context.verify_plugin_data_not_sent()\n test_context.verify_has_log_error(f\"1 - Light provider for '{invalid_provider}' not found\")\n\n\n@pytest.mark.parametrize(f\"{FIXTURE_WORKFLOW_NAME},{FIXTURE_VALUE_BEFORE},{FIXTURE_VALUE_AFTER},{FIXTURE_NAME_INITIAL}\", [\n (\"light_turn_on_test\", STATE_OFF, STATE_ON, STATE_OFF),\n (\"light_turn_off_test\", STATE_ON, STATE_OFF, STATE_ON),\n (\"light_toggle_test\", STATE_OFF, STATE_ON, STATE_OFF),\n (\"light_toggle_test\", STATE_ON, STATE_OFF, STATE_OFF),\n])\nasync def test_light_plugin_api_set_config_provider(test_context: TstContext, workflow_name: str, value_before: str, value_after: str, name_initial: str): \n entity_id = f\"light.light_initial_{name_initial}_test\"\n mock_plugin = get_mock_plugin(\n light_provider=LIGHT_MOCK_PROVIDER,\n )\n set_test_config(test_context,\n setup_mock_provider=True,\n light_entity_id=entity_id,\n light_entity_state=value_before\n )\n\n await test_context.async_start_react([mock_plugin])\n \n data = {\n ATTR_ENTITY_ID: entity_id,\n ATTR_STATE: value_after,\n }\n\n await test_context.async_send_reaction_event()\n test_context.verify_has_no_log_issues()\n test_context.verify_plugin_data_sent()\n test_context.verify_plugin_data_content(data)\n\n\n@pytest.mark.parametrize(f\"{FIXTURE_WORKFLOW_NAME},{FIXTURE_VALUE_BEFORE},{FIXTURE_VALUE_AFTER},{FIXTURE_NAME_INITIAL}\", [\n (\"light_turn_on_test\", STATE_OFF, STATE_ON, STATE_OFF),\n (\"light_turn_off_test\", STATE_ON, STATE_OFF, STATE_ON),\n (\"light_toggle_test\", STATE_OFF, STATE_ON, STATE_OFF),\n (\"light_toggle_test\", STATE_ON, STATE_OFF, STATE_OFF),\n])\nasync def test_light_plugin_api_set_event_provider(test_context: TstContext, workflow_name: str, value_before: str, value_after: str, name_initial: str): \n entity_id = f\"light.light_initial_{name_initial}_test\"\n mock_plugin = get_mock_plugin()\n set_test_config(test_context,\n setup_mock_provider=True,\n light_entity_id=entity_id,\n light_entity_state=value_before\n )\n\n await test_context.async_start_react([mock_plugin])\n \n data_in = {\n ATTR_LIGHT_PROVIDER: LIGHT_MOCK_PROVIDER \n }\n data_out = {\n ATTR_ENTITY_ID: entity_id,\n ATTR_STATE: value_after,\n }\n\n await test_context.async_send_reaction_event(data=data_in)\n test_context.verify_has_no_log_issues()\n test_context.verify_plugin_data_sent()\n test_context.verify_plugin_data_content(data_out)\n\n\n@pytest.mark.parametrize(F\"{FIXTURE_WORKFLOW_NAME},{FIXTURE_VALUE_BEFORE},{FIXTURE_EXPECTED_SERVICE},{FIXTURE_NAME_INITIAL}\", [\n (\"light_turn_on_test\", STATE_OFF, SERVICE_TURN_ON, STATE_OFF),\n (\"light_turn_off_test\", STATE_ON, SERVICE_TURN_OFF, STATE_ON),\n])\nasync def test_light_plugin_generic_provider_set(test_context: TstContext, workflow_name: str, value_before: str, expected_service: str, name_initial: str): \n entity_id = f\"light.light_initial_{name_initial}_test\"\n mock_plugin = get_mock_plugin()\n set_test_config(test_context,\n light_entity_id=entity_id,\n light_entity_state=value_before\n )\n\n await test_context.async_start_react([mock_plugin])\n \n data = {\n ATTR_ENTITY_ID: entity_id,\n }\n\n await test_context.async_send_reaction_event()\n test_context.verify_has_no_log_issues()\n test_context.verify_service_call_sent()\n test_context.verify_service_call_content(LIGHT_DOMAIN, expected_service, data)\n \n\n@pytest.mark.parametrize(f\"{FIXTURE_WORKFLOW_NAME},{FIXTURE_INITIAL_STATE}\", [\n (\"light_turn_on_skip_test\",STATE_ON),\n (\"light_turn_off_skip_test\", STATE_OFF),\n])\nasync def test_light_plugin_api_skip(test_context: TstContext, workflow_name: str, initial_state: str):\n entity_id = f\"light.light_initial_{initial_state}_test\"\n mock_plugin = get_mock_plugin(\n light_provider=LIGHT_MOCK_PROVIDER,\n )\n set_test_config(test_context,\n setup_mock_provider=True,\n light_entity_id=entity_id,\n light_entity_state=initial_state\n )\n\n await test_context.async_start_react([mock_plugin])\n await test_context.async_send_reaction_event()\n test_context.verify_has_no_log_issues()\n test_context.verify_plugin_data_not_sent()\n\n\n@pytest.mark.parametrize(FIXTURE_WORKFLOW_NAME, [\"light_state_test\"])\nasync def test_light_plugin_input_block_state_change(test_context: TstContext):\n entity_id = \"light_state_test\"\n mock_plugin = get_mock_plugin()\n await test_context.async_start_virtual()\n lc = await test_context.async_start_light()\n await test_context.async_start_react([mock_plugin])\n \n async with test_context.async_listen_action_event():\n await lc.async_turn_on(entity_id)\n await test_context.hass.async_block_till_done()\n await test_context.async_verify_action_event_received(expected_count=3)\n test_context.verify_action_event_data(\n expected_entity=entity_id,\n expected_type=LIGHT_DOMAIN,\n expected_action=ACTION_CHANGE,\n event_index=0)\n test_context.verify_action_event_data(\n expected_entity=entity_id,\n expected_type=LIGHT_DOMAIN,\n expected_action=STATE_ON,\n event_index=1)\n test_context.verify_action_event_data(\n expected_entity=entity_id,\n expected_type=LIGHT_DOMAIN,\n expected_action=ACTION_TOGGLE,\n event_index=2)\n test_context.verify_has_no_log_issues()\n await test_context.hass.async_block_till_done()\n","repo_name":"gertjanstulp/ha-react","sub_path":"tests/plugins/test_light_plugin.py","file_name":"test_light_plugin.py","file_ext":"py","file_size_in_byte":8635,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"4305031349","text":"from pikopy.piko import Piko\nimport sqlite3\nimport datetime\nfrom astral import LocationInfo\nfrom astral.sun import sun\nfrom pytz import timezone\nimport os\n\n\ndef fill_with_sun(lat, lon, inverter):\n timezone_local = timezone('Europe/Berlin')\n now = datetime.datetime.now(timezone_local)\n city = LocationInfo(\"St\", \"GER\", timezone_local, lat, lon)\n s = sun(city.observer, date=now, tzinfo=timezone_local)\n\n sunrise = s[\"dawn\"]\n sunset = s[\"dusk\"]\n\n if not sunrise <= now <= sunset:\n return city\n\n fill_db_with_piko(inverter)\n return s\n\n\ndef fill_db_with_piko(i: Piko):\n # if i.get_status() == \"Aus\":\n # return None\n\n dc_1_u = i.get_string1_voltage()\n dc_1_i = i.get_string1_current()\n ac_1_u = i.get_l1_voltage()\n ac_1_p = i.get_l1_power()\n\n dc_2_u = i.get_string2_voltage()\n dc_2_i = i.get_string2_current()\n ac_2_u = i.get_l2_voltage()\n ac_2_p = i.get_l2_power()\n\n dc_3_u = i.get_string3_voltage()\n dc_3_i = i.get_string3_current()\n ac_3_u = i.get_l3_voltage()\n ac_3_p = i.get_l3_power()\n\n current_power = i.get_current_power()\n daily_energy = i.get_daily_energy()\n total_energy = i.get_total_energy()\n status = i.get_status()\n\n PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))\n database = os.path.join(PROJECT_ROOT, \"app.db\")\n\n conn = sqlite3.connect(database)\n c = conn.cursor()\n sql_statement = [(datetime.datetime.now(),\n dc_1_u, dc_1_i,\n ac_1_u, ac_1_p,\n dc_2_u, dc_2_i,\n ac_2_u, ac_2_p,\n dc_3_u, dc_3_i,\n ac_3_u, ac_3_p,\n current_power,\n daily_energy,\n total_energy,\n status\n )]\n\n c.executemany('INSERT INTO pvdata VALUES (null,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)', sql_statement)\n conn.commit()\n c.close()\n\n return None\n\n\nif __name__ == \"__main__\":\n inverter = Piko(host=\"http://192.168.178.78\")\n fill_with_sun(52.005552, 6.919066, inverter)\n\n","repo_name":"alexrothm/kostalpvpy","sub_path":"kostal_db_fill.py","file_name":"kostal_db_fill.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"30337823829","text":"def translate(value: float, value_range: (int, int), desired_range: (int, int)):\n \"\"\"\n Translate a numeric range to another numeric range\n :param value: The value to convert\n :param value_range: The (min, max) of the value\n :param desired_range: The (min, max) of the translation range\n :return: Returns the converted value within the desired_range\n \"\"\"\n # Figure out how 'wide' each range is\n value_span = value_range[1] - value_range[0]\n desired_span = desired_range[1] - desired_range[0]\n\n # Get the fraction of the current range\n # (e.g. given value_range=(0, 10) and value = 5, then this will be halfway or 0.5)\n value_scaled = float(value - value_range[0]) / float(value_span)\n\n # Figure out what 0.5 means in the other range.\n # (e..g given desired_range=(-100, 100), then this would give back 0\n return desired_range[0] + (value_scaled * desired_span)\n","repo_name":"DavidA94/2020-SteelHearts","sub_path":"src/python/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"18921375147","text":"#####################################################\r\n#####################################################\r\n##\t\t\t\t\t\t ##\r\n##\t HERE LIES THE MAGNIFICENT CODE WHICH \t ##\r\n## ASSIGNS SARS VARIANTS TO BAM FILES\t ##\r\n##\t WRITTEN ENTIRELY BY MARIA MALLIAROU\t ##\r\n##\t \t\t\t\t\t ##\r\n#####################################################\r\n#####################################################\r\n\r\n#-------------Dependencies--------------------------\r\n\r\n##check if installed and if not \"freebayes\", \"java\" and \"have snpEff in directory\" \r\ndef install(package):\r\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\r\n\r\n#--------------Imports-------------------------------\r\n\r\nimport subprocess \r\nimport io\r\nimport os\r\nimport sys\r\ntry:\r\n import pandas as pd\r\nexcept ModuleNotFoundError:\r\n install(\"pandas\") ###install \r\nimport json\r\ntry:\r\n from Bio import SeqIO\r\nexcept ModuleNotFoundError:\r\n install(\"biopython\")\r\n#from Bio import SeqIO\r\nimport re\r\nimport argparse\r\n\r\n\r\n#--------------Functions-----------------------------\r\ndef preprocess_file(bam, ref, path):\r\n filename = bam.split(\"/\")[-1][0:-4] if len(bam.split(\"/\")) > 1 else bam[0:-4]\r\n \r\n os.system(\"freebayes -f {} -F 0.1 --pooled-continuous {} > {}/{}_freebayes.vcf\".format(ref, bam , path, filename))\r\n os.system(\"sed 's/2019-nCoV/NC_045512.2/g' {}/{}_freebayes.vcf > {}/{}_freebayes_sed.vcf\".format(path, filename, path, filename))\r\n os.system(\"java -jar snpEff/snpEff.jar ann NC_045512.2 {}/{}_freebayes_sed.vcf > {}/{}_snpEff.vcf\".format(path, filename, path, filename))\r\n return \"{}/{}_snpEff.vcf\".format(path, filename)\r\n\r\n\r\n\r\ndef f2dict (input_fasta):\r\n\t'''\r\n\tTakes a fasta file and returns a dictionary with the header as key and the sequence as values\r\n\t'''\r\n\tinput_file = open(input_fasta)\r\n\tmy_dict = SeqIO.to_dict(SeqIO.parse(input_file, \"fasta\"))\r\n\treturn my_dict\r\n\r\ndef fasta_ref(file):\r\n with open(file, \"r\") as f:\r\n lines = [line.strip(\"\\n\") for line in f]\r\n seq = \"\".join(lines[1:])\r\n return seq\r\n\r\ndef gene_calling(vcf_table):\r\n mutated_genes = []\r\n for i in vcf_table.index:\r\n for gene in pos.index:\r\n if vcf_table.loc[i][\"POS\"] < pos(gene).split(\"-\")[1] and vcf_table.loc[i][\"POS\"] > pos(gene).split(\"-\")[0]:\r\n #print( vcf_table.loc[i][\"INFO\"].split(\"|\")[9])\r\n mutated_genes.append(\"{}:{}\".format(gene, vcf_table.loc[i][\"INFO\"].split(\"|\")[9][2:]) ) \r\n return mutated_genes\r\n\r\ndef read_vcf(path):\r\n\t'''\r\n\twhat do you think it does?It reads a vcf file, dah!\r\n\t'''\r\n\twith open(path, 'r') as f:\r\n\t\tlines = [l for l in f if not l.startswith('##')]\r\n\treturn pd.read_csv(\r\n\t\tio.StringIO(''.join(lines)),\r\n\t\tdtype={'#CHROM': str, 'POS': int, 'ID': str, 'REF': str, 'ALT': str,\r\n\t\t\t 'QUAL': str, 'FILTER': str, 'INFO': str},\r\n\t\tsep='\\t'\r\n\t).rename(columns={'#CHROM': 'CHROM'})\r\n\r\ndef is_aa(aa, aa_table):\r\n\t'''\r\n\treturns if given string is aminoacid\r\n\t'''\r\n\tif aa in aa_table:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\t\t\r\n\r\ndef translate(seq):\r\n \r\n table = {\r\n 'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',\r\n 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',\r\n 'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',\r\n 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R', \r\n 'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',\r\n 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',\r\n 'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',\r\n 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',\r\n 'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',\r\n 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',\r\n 'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',\r\n 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',\r\n 'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',\r\n 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',\r\n 'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_',\r\n 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W',\r\n }\r\n protein =\"\"\r\n if len(seq)%3 == 0:\r\n for i in range(0, len(seq), 3):\r\n codon = seq[i:i + 3]\r\n protein += table[codon] if is_aa(codon , table) else \"_\"\r\n elif len(seq)%3 == 1 :\r\n for i in range(0, len(seq) - 1, 3):\r\n codon = seq[i:i + 3]\r\n protein += table[codon] if is_aa(codon , table) else \"_\"\r\n elif len(seq)%3 == 2 :\r\n for i in range(0, len(seq) - 2, 3):\r\n codon = seq[i:i + 3]\r\n protein += table[codon] if is_aa(codon , table) else \"_\"\r\n return protein\t\r\n\t\r\n\t\r\ndef triplet_position(pos):\r\n\r\n if int(pos)%3 == 0:\r\n return int(int(pos)-2)\r\n elif int(pos)%3 == 1:\r\n return int(pos)\r\n else:\r\n return int(int(pos)-1)\r\n \r\n\r\ndef all_mutations(path): #####################################################3 this needs better files\r\n files = os.listdir(path)\r\n variants = [ file.split(\"_\")[0] for file in files]\r\n os.chdir(path)\r\n print(os.getcwd())\r\n all_mutes = {}\r\n for i in files:\r\n with open(i, \"r\") as f:\r\n lines = [line.strip(\"\\n\") for line in f]\r\n for line in lines:\r\n if line not in all_mutes:\r\n all_mutes[line] = [i.split(\"_\")[0]]\r\n else:\r\n all_mutes[line].append(i.split(\"_\")[0])\r\n\r\n return all_mutes\r\n\r\ndef aa_change(mutation):\r\n '''\r\n filters synonymus mutations\r\n '''\r\n if re.search(pattern=\".+:[A-Z][1-9]+[A-Z]\", string=mutation):\r\n subs = re.findall(r'.+:([\\w])[1-9]+([\\w])', mutation)\r\n if subs[0][0] == subs[0][1]:\r\n return False\r\n else:\r\n return True\r\n return True\r\n \r\n \r\ndef gene_calling(vcf_file):\r\n vcf_table = read_vcf(vcf_file)\r\n mutated_genes = []\r\n pos = {\r\n 'ORF1a':'266-13468', 'ORF1b':'13465-21555', 'S':'21563-25384', 'ORF3a':'25393-26220', \r\n 'E':'26245-26472', 'M':'26523-27191', 'ORF6':'27202-27387', 'ORF7a':'27394-27759', \r\n 'ORF7b':'27756-27887', 'N':'28274-29533', 'ORF10':'29558-29674', 'ORF8':'27894-28259'\r\n } \r\n for i in vcf_table.index:\r\n #print(vcf_table.loc[i])\r\n for gene in pos.keys():\r\n if int(vcf_table.loc[i][\"POS\"]) < int(pos[gene].split(\"-\")[1]) and vcf_table.loc[i][\"POS\"] > int(pos[gene].split(\"-\")[0]):\r\n #print( vcf_table.loc[i][\"INFO\"].split(\"|\")[9])\r\n mutated_genes.append(\"{}:{}\".format(gene, vcf_table.loc[i][\"INFO\"].split(\"|\")[9][2:]) ) \r\n \r\n \r\n return mutated_genes\r\n\r\n\r\ndef substitution(mutation, sequence_dict):\r\n site = re.findall(r'(\\d+)([ATGC])>([ATGC])', mutation[1])\r\n gene = mutation[0]\r\n pos = triplet_position(site[0][0])\r\n assert(site[0][1] == sequence_dict[gene][int(site[0][0]) -1])\r\n temp_seq = sequence_dict[gene][0:int(site[0][0])-1] + site[0][2] + sequence_dict[gene][int(site[0][0]): ]\r\n \r\n before = translate(sequence_dict[mutation[0]][pos -1 : pos +3])\r\n after = translate(temp_seq[pos -1 : pos +3])\r\n \r\n\r\n return gene + \":\" +before + str(int((pos+2)/3)) + after\r\n \r\ndef one_deletion(mutation, sequence_dict):\r\n site = re.findall(r'(\\w+)del([ATGC])', mutation[1])\r\n gene = mutation[0]\r\n pos = triplet_position(site[0][0])\r\n assert(site[0][1] == sequence_dict[gene][int(site[0][0]) -1])\r\n temp_seq = sequence_dict[gene][0:int(site[0][0])-1] + sequence_dict[gene][int(site[0][0]): ]\r\n \r\n before = translate(sequence_dict[mutation[0]][pos -1 : pos +3])\r\n after = translate(temp_seq[pos -1 : pos +3])\r\n \r\n return gene + \":\" +before + str(int((pos+2)/3)) + after\r\n #return gene + \":\" +before + str(int((pos+2)/3)) + \"del\")\r\n #return \"ORF shift\"\r\n \r\ndef duplication(mutation, sequence_dict):\r\n site = re.findall(r'(\\w+)dup([ATGC]+)', mutation[1])\r\n gene = mutation[0]\r\n pos = triplet_position(site[0][0])\r\n assert(site[0][1] == sequence_dict[gene][int(site[0][0]) -1])\r\n temp_seq = sequence_dict[gene][0:int(site[0][0])-1] + site[0][1]+ site[0][1] + sequence_dict[gene][int(site[0][0]): ]\r\n before = translate(sequence_dict[mutation[0]][pos -1 : pos +3])\r\n after = translate(temp_seq[pos -1 : pos +3])\r\n return gene + \":\" +before + str(int((pos+2)/3)) + after\r\n #return \"ORF shift\"\r\n \r\ndef insertion(mutation, sequence_dict):\r\n site = re.findall(r'(\\w+)_(\\w+)ins([ATGC]+)', mutation[1])\r\n gene = mutation[0]\r\n pos = triplet_position(site[0][0])\r\n temp_seq = sequence_dict[gene][0:int(site[0][0])] + site[0][2] + sequence_dict[gene][int(site[0][0]): ]\r\n before = translate(sequence_dict[mutation[0]][pos -1 : pos +3])\r\n after = translate(temp_seq[pos -1 : pos +3])\r\n return gene + \":\" +before + str(int((pos+2)/3)) + after\r\n #return gene + \":\" +before + str(int((pos+2)/3)) + \"ORF shift\"\r\n\r\ndef multiple_deletion(mutation, sequence_dict): ###########################this needs fixing \r\n site = re.findall(r'(\\w+)_(\\w+)del([ATGC]+)', mutation[1])\r\n gene = mutation[0]\r\n pos1 = triplet_position(site[0][0])\r\n pos2 = triplet_position(site[0][1])\r\n assert(site[0][2] == sequence_dict[gene][int(site[0][0]) -1 :int(site[0][1])])\r\n #temp_seq = sequence_dict[gene][0:int(site[0][0])-1] + sequence_dict[gene][int(site[0][0]): ]\r\n before = translate(sequence_dict[mutation[0]][pos1 -1 : pos2 +3])\r\n if len(site[0][2])% 3 != 0:\r\n return [gene + \":\" + str(int(pos1/3 +1)) + \"ORF shift due to deletion\"]\r\n elif pos1 == int(site[0][0]):\r\n if len(site[0][2]) == 3 :\r\n return [gene + \":\" +before[0] + str(int((pos1+2)/3))+ + \"del\"]\r\n else:\r\n return [ gene + \":\" +before[0] + str(int((pos1+2)/3))+ \"_\" + before[-1] + str(int((pos2+2)/3)) + \"del\"]\r\n else:\r\n temp_seq = sequence_dict[gene][0:int(site[0][0])-1] + sequence_dict[gene][int(site[0][1]): ]\r\n if len(site[0][2]) == 3 :\r\n \r\n return [ gene + \":\" +before[-1] + str(int((pos2+2)/3)) + translate(temp_seq[pos1-1:pos1+3]), gene + \":\" +before[0] + str(int((pos1+2)/3) )+ \"del\"]\r\n else:\r\n return [ gene + \":\" +before[-1] + str(int((pos2+2)/3)) + translate(temp_seq[pos1-1:pos1+3]), gene + \":\" +before[0] + str(int((pos1+2)/3) )+ \"_\" + before[-2] + str(int((pos2+2)/3)-1) + \"del\"]\r\n\r\n\r\n\r\ndef deletion_insertion(mutation, sequence_dict): \r\n #print(mutation)\r\n site = re.findall(r'(\\w+)_(\\w+)del([ATGC]+)ins([ATGC]+)', mutation[1])\r\n gene = mutation[0]\r\n pos1 = triplet_position(site[0][0])\r\n pos2 = triplet_position(site[0][1])\r\n assert(site[0][2] == sequence_dict[gene][int(site[0][0]) -1 :int(site[0][1])])\r\n to_return = []\r\n if len(site[0][2]) == len(site[0][3]):\r\n temp_seq = sequence_dict[gene][0:int(site[0][0])-1] + site[0][3]+sequence_dict[gene][int(site[0][1]): ]\r\n before = translate(sequence_dict[mutation[0]][pos1 -1 : pos2 +3])\r\n after = translate(temp_seq[pos1 -1 : pos2 +3])\r\n aa_pos =list(range( int((pos1+2)/3) , int((pos2+2)/3) +1 ))\r\n #print(aa_pos)\r\n for i in range(0,len(aa_pos)):\r\n to_return.append(gene + \":\" + before[i] + str(aa_pos[i]) + after[i])\r\n return to_return\r\n elif len(site[0][2]) != len(site[0][3]) and (len(site[0][3]) - len(site[0][2])) % 3 == 0:\r\n\r\n temp_seq = sequence_dict[gene][0:int(site[0][0])-1] + site[0][3] +sequence_dict[gene][int(site[0][1]): ]\r\n shift = int((triplet_position(int(site[0][0]) + len(site[0][3])) + 2 ) /3)\r\n before = translate(sequence_dict[mutation[0]][pos1 -1 : pos2 +3])\r\n after = translate(temp_seq[pos1 -1 : pos2 +3])\r\n aa_pos =list(range( int((pos1+2)/3) , int((pos2+2)/3) +1 ))\r\n \r\n aa_subs = [ q1 for q1 in aa_pos if q1<=shift ]\r\n aa_del = [ q2 for q2 in aa_pos if q2>shift ]\r\n \r\n \r\n for i in range(0,len(aa_subs)):\r\n #print(gene + \":\" + before[i] + str(aa_pos[i]) + after[i])\r\n to_return.append(gene + \":\" + before[i] + str(aa_pos[i]) + after[i])\r\n to_return.append( gene + \":\" + before[len(aa_subs)] + str(aa_del[0]) + \"_\" + before[-1] + str(aa_del[-1]) + \"del\" )\r\n return to_return\r\n else:\r\n return [gene + \":\" + str(int(pos1/3 +1)) + \"ORF shift\"]\r\n \r\n\r\ndef mut_translator(mutation, sequence_dict):\r\n mut_list = []\r\n for i in mutation:\r\n info = i.split(\":\")\r\n gene = info[0]\r\n if re.search(pattern=\"[ATGC]>[ATGC]\", string=info[1]) : \r\n mut_list.append(substitution(info, sequence_dict))\r\n elif re.search(pattern=\"[0-9]del[ATGC]$\", string=info[1]):\r\n mut_list.append(one_deletion(info, sequence_dict))\r\n elif re.search(pattern=\"[1-9]dup[ATGC]+\", string=info[1]):\r\n mut_list.append(duplication(info, sequence_dict))\r\n elif re.search(pattern=\"[0-9]+_[0-9]+ins[ATGC]\", string=info[1]):\r\n mut_list.append(insertion(info, sequence_dict))\r\n elif re.search(pattern=\"[0-9]+_[0-9]+del[ATGC]+$\", string=info[1]):\r\n for d in multiple_deletion(info, sequence_dict):\r\n mut_list.append(d)\r\n #print(multiple_deletion(info, sequence_dict))\r\n elif re.search(pattern=\"[0-9]+_[0-9]+del[ATGC]+ins[ATGC]+$\", string=info[1]):\r\n for m in deletion_insertion(info, sequence_dict):\r\n mut_list.append(m)\r\n \r\n else:\r\n pass\r\n final = [q for q in mut_list if aa_change(q)]\r\n return final\r\n \r\n \r\ndef unique_mutations1(sample_muts, all_muts):\r\n unique_mutations = {}\r\n for sm in sample_mutations:\r\n if sm in all_muts and len(all_muts[sm]) == 1 and len(all_muts[sm][0]) > 1 :\r\n unique_mutations[sm] = all_mutas[sm]\r\n return \r\n\r\n\r\ndef get_report(path ,filename, uni, shared, no):\r\n #print(filename, path)\r\n with open(\"{}/{}_report.txt\".format(path, filename), \"w\") as f:\r\n f.write(\"Final report of sample {}\".format(filename) + \"\\n\")\r\n f.write(\"Unique mutations found in this sample:\" + \"\\n\")\r\n for i in uni:\r\n f.write(i+ \":\" + \" \".join(uni[i]) + \"\\n\")\r\n f.write(\"Shared mutations found in this sample:\" + \"\\n\")\r\n for k in shared:\r\n f.write(k + \":\" + \" \".join(shared[k]) + \"\\n\")\r\n f.write(\"The folllowing mutations are not reported in our database\" + \"\\n\")\r\n for l in no:\r\n f.write(l + \"\\n\")\r\n \r\n\r\ndef core_function(vcf, genes, total_mutations, work) :\r\n\r\n muts = gene_calling(vcf) ####### this file in order to be correct needs preproccecing with varCalling from freebayes, change ref name (from 2019-nCoV to NC_045512.2 and genomic mutation indentification from snpEff) \r\n sample_mutations = mut_translator(muts,genes)\r\n unique_mutations = {}\r\n shared_mutations = {}\r\n no_hit = []\r\n for sm in sample_mutations:\r\n if sm in total_mutations and len(total_mutations[sm]) == 1 and len(total_mutations[sm][0]) > 1 :\r\n unique_mutations[sm] = total_mutations[sm]\r\n elif sm in total_mutations and len(total_mutations[sm]) > 1 :\r\n shared_mutations[sm] = total_mutations[sm]\r\n else:\r\n no_hit.append(sm)\r\n sample_name = vcf.split(\"/\")[-1][0:-4] if len(vcf.split(\"/\")) > 1 else vcf[0:-4]\r\n print(work)\r\n os.system(\"mkdir {}/results\".format(work))\r\n get_report( \"{}/results\".format(work) , sample_name ,unique_mutations, shared_mutations, no_hit)\r\n\r\n unique_variants = list(set(sum(unique_mutations.values(), [])))\r\n\r\n if len(unique_variants) == 1 :\r\n u_counter = 0 \r\n s_counter = 0\r\n for mvs in shared_mutations:\r\n if unique_variants[0] in shared_mutations[mvs]:\r\n u_counter += 1\r\n else: ##### Unfortunately BA.2 does not have unique mutations so check if its in the list which the other variants is not\r\n if \"BA.2\" in shared_mutations[mvs]:\r\n s_counter += 1\r\n else:\r\n print(shared_mutations[mvs])\r\n if u_counter == len(shared_mutations) :\r\n print(\"Found unique mutations of Variant {}\".format(unique_variants[0]))\r\n elif u_counter + s_counter == len(shared_mutations) :\r\n print(\"Found unique mutations of Variant {} and BA.2\".format(unique_variants[0])) \r\n else:\r\n u_counter = 0 \r\n s_counter = 0 \r\n for mvs in shared_mutations:\r\n if not set(unique_variants).isdisjoint(set(shared_mutations[mvs])):\r\n u_counter += 1\r\n elif set(unique_variants).isdisjoint(set(shared_mutations[mvs])) and \"BA.2\" in shared_mutations[mvs] :\r\n s_counter += 1\r\n if u_counter == len(shared_mutations) :\r\n print(\"Found unique mutations of Variant {}\".format(\",\".join(unique_variants)))\r\n elif u_counter + s_counter == len(shared_mutations) :\r\n print(\"Found unique mutations of Variant {} and BA.2\".format(\",\".join(unique_variants)))\r\n \r\n\r\n##################### ------------your arguments-----------------------\r\nparser = argparse.ArgumentParser(description='Wanna derive SARS-COV-2 variants from your .bam or .vcf file?This is the code for you!', epilog = \"author: Maria Malliarou v1.1\" )\r\n\r\nparser.add_argument('--input_data', type = str, required = True, help = \"Please provide your .vcf / .bam file or respective direcories for multiple recognision.If you select .bam you also need to give argument --preprocess as True\" ) ###θα παίρνει ένα ή πολλα vcf αρχείο\r\n###parser.add_argument('--action') #\r\nparser.add_argument('--preprocess',default = False, type = bool, help = \"If select True, don't provide vcf but your initial .bam file which is trimmned, sorted and SARS-COV-2 aligned\") ###\r\n#parser.add_argument('--bam', type = str, help = \"Please provide your bam file or vcf direcory for multiple recognision if you have selected preprocess True\") ###θα παίρνει ένα ή πολλα vcf αρχείο\r\nparser.add_argument('--reference', type = str, help = \"Please provide your reference fasta file used if you choose preprocess\") \r\nparser.add_argument('--alignment_name', nargs = 1, default = \"2019-nCoV\")\r\nparser.add_argument('--report', default = True, type = bool, help = \"The name of the report\" )\r\n\r\nargs = parser.parse_args()\r\n\r\n\r\n######################-----------Parse database files -------------------\r\n\r\ndatabase = all_mutations(\"variant_data\") ###### get all of the mutations reported manually\r\nos.chdir(\"../\")\r\ngene_sequence = { i.split(\":\")[0] : str(f2dict(\"ncbi_dataset/data/gene.fna\")[i].seq) for i in f2dict(\"ncbi_dataset/data/gene.fna\")} #####get correct positions with corresponding sequence of all SARS-COV-2 genes\r\n\r\n######################-----------Core Code-------------------------------\r\n\r\n\r\n\r\n\r\n\r\n\r\nif os.path.isfile(args.input_data):\r\n sample = args.input_data.split(\"/\")[-1][0:-4] if len(args.input_data.split(\"/\")) > 1 else args.input_data[0:-4]\r\n os.system(\"mkdir CoVarCaller_{}_results\".format(sample))\r\n if args.preprocess == True:\r\n os.system(\"mkdir CoVarCaller_{}_results/vcf_files\".format(sample))\r\n core_function(preprocess_file(args.bam, args.reference), gene_sequence, database, \"CoVarCaller_{}_results\".format(sample) )\r\n else:\r\n core_function(args.input_data, gene_sequence, database, \"CoVarCaller_{}_results\".format(sample) )\r\n \r\nelif os.path.isdir(args.input_data):\r\n files = os.listdir(args.input_data)\r\n files = [q for q in files if q.endswith(\".bam\")]\r\n #print(files)\r\n os.system(\"mkdir {}all_results\".format(args.input_data))\r\n path = \"{}all_results\".format(args.input_data)\r\n\r\n for file in files:\r\n print(\"Processing file {}\".format(file) )\r\n sample = file.split(\"/\")[-1][0:-4] if len(file.split(\"/\")) > 1 else file[0:-4]\r\n os.system(\"mkdir {}/CoVarCaller_{}_results\".format(path ,sample))\r\n if args.preprocess == True:\r\n \r\n os.system(\"mkdir {}/CoVarCaller_{}_results/vcf_results\".format(path , sample))\r\n new_vcf = preprocess_file(\"{}/{}\".format(args.input_data,file), args.reference, \"{}/CoVarCaller_{}_results/vcf_results\".format(path , sample))\r\n print(new_vcf)\r\n core_function( new_vcf, gene_sequence, database, \"{}/CoVarCaller_{}_results\".format(path,sample) )\r\n\r\n else:\r\n core_function(\"{}/{}\".format(path,file), gene_sequence, database, \"{}/CoVarCaller_{}_results\".format(path,sample) )\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n######\r\n \r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"marimall/CoVarFinder","sub_path":"CoVarFinder.py","file_name":"CoVarFinder.py","file_ext":"py","file_size_in_byte":20516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"11915431544","text":"# Combine All Results\n#\n#\n# Take each piece of code, run it on a common data set, output individual results, output combined results\n#\n#\nimport fileinput\nimport csv\nimport re\nfrom math import fabs, floor\nfrom datetime import datetime, timedelta\nimport os\nimport sys\nimport time\nfrom operator import itemgetter\n\n\nbyron = []\nwillie =[]\ngreg = []\npeter= []\n\n\n# Import csv files (give options for 1=all_chile 2=no_retweets_chile 3=no_retweets_guatemala 4=no_retweets_tornado)\nuserInput = input(\"What file would you like to use?(1=all_chile 2=no_retweets_chile 3=no_retweets_guatemala 4=no_retweets_tornado): \")\nif userInput == 1:\n\tinputfile = open(\"All_Tweets_chile.csv\")\nelif userInput == 2:\n\tinputfile = open(\"No_Retweets_chile.csv\")\nelif userInput == 3:\n\tinputfile = open(\"No_Retweets_guatemala.csv\")\nelif userInput == 4:\n\tinputfile = open(\"No_Retweets_tornado.csv\")\n\n\n# Change this number to tweak how much a term's popularity must be increasing in order to be added\nbaseline = raw_input(\"Enter a basline (How many times a term is used before it is added)(Default 1000): \")\nif baseline == \"\":\n baseline = 1000\nbaseline = float(baseline)\n\n# Change this number to tweak how much a term's popularity must be increasing in order to be added\nexponent_threshold = raw_input(\"Enter an exponent threshold (By how many times much a number increase in popularity to be added)(Default 10): \")\nif exponent_threshold == \"\":\n exponent_threshold = 10\nexponent_threshold = float(exponent_threshold)\n\n# Change this number to tweak the percentage of occurrence a term must have among tweets in order to \n# be added\nfrequency_threshold = raw_input(\"Enter a frequency threshold (What proportion of tweets must contain the term before it is added)(Default 0.1): \")\nif frequency_threshold == \"\":\n frequency_threshold = 0.1\nfrequency_threshold = float(frequency_threshold)\n\n# Change this number to tweak the time intervals that are compared\ntime_interval = raw_input(\"Enter a time interval in minutes (How long should terms be collected before comparing to the previous interval)(Default 180): \")\nif time_interval == \"\":\n time_interval = 3 * 60 * 60\nelse:\n time_interval = int(time_interval) * 60 \n\n\n# Run Byrons code (allow for variables)\n# Save to a variable/file\nword_count = dict()\ndate_reached = dict()\ndate_last = dict() # Last occurence of word\n\n# Keeps track of the total number of tweets\ntweet_count = 0\nstopwords = 1\n\nwith inputfile as infile:\n\treader = csv.reader(infile, delimiter=\",\")\n\t# Ignore first line of csv\n\tinfile.readline()\n\t# Runs through each line of input csv\n\tfor i,line in enumerate(reader):\n\t\ttweet_count += 1\n\t\ttweet_time = datetime.strptime(line[0].replace(\"+0000 \", \"\"), \"%a %b %d %H:%M:%S %Y\")\n\n\t\t# Remove special characters and date\n\t\ttweet_body = re.sub('[^\\w .]+', '', line[1])\n\n\t\t# Go through each word and increase its frequency count\n\t\tfor word in tweet_body.split():\n\t\t\tword = word.lower()\n\t\t\tif len(word) > 3: # Length greater than 3\n\t\t\t\tif word not in word_count:\n\t\t\t\t\tword_count[word] = [1,tweet_time,tweet_time, 0]\n\t\t\t\telse:\n\t\t\t\t\tword_count[word][0] += 1\n\t\t\t\t\t# Time when trending word hits threshold\n\t\t\t\t\tif word_count[word][0] == baseline:\n\t\t\t\t\t\tword_count[word][1] = tweet_time\n\t\t\t\t\t# Last time trending word was used\n\t\t\t\t\tif word_count[word][0] > baseline:\n\t\t\t\t\t\tword_count[word][2] = tweet_time\n\t\t\t\tx = word_count[word][2] - word_count[word][1]\n\t\t\t\ty = floor(fabs(x.total_seconds() / 3600))\n\t\t\t\tif y > 0:\n\t\t\t\t\tz = word_count[word][0] / y\n\t\t\t\tword_count[word][3] = y\n\nsorted_word_count = word_count.items()\nsorted_word_count.sort(key = lambda item: item[1])\nfor word,freq in sorted_word_count:\n\tif freq[0] > baseline:\n\t\t#print \"Word: %s Freq: %i Baseline Time: %s Avg: %s uses per minute\" %(word,freq[0], freq[1], freq[3])\n\t\t#print word\n\t\tbyron.insert(0,word)\n\n\n\n# Run Willie's code (allow for variables)\n# Save to a variable/file\n#!/usr/bin/env python\nif userInput == 1:\n\tinputfile = open(\"All_Tweets_chile.csv\")\nelif userInput == 2:\n\tinputfile = open(\"No_Retweets_chile.csv\")\nelif userInput == 3:\n\tinputfile = open(\"No_Retweets_guatemala.csv\")\nelif userInput == 4:\n\tinputfile = open(\"No_Retweets_tornado.csv\")\n\ncapital_count = dict()\ncity_count = dict()\n\n# Keeps track of city tweets to help with sorting\ntweet_count = 0\n\n# Find whole words within the list of cities\ndef findWholeWord(w):\n return re.compile(r'\\b({0})\\b'.format(w), flags=re.IGNORECASE).search\n\n# List of cites - Thanks Dan!\ncity_list = open(\"cityText.txt\").read().splitlines()\n# \"Stop Words\" to ignore in search - Thanks Greg!\nstop_words = open(\"stop_words/english.stop\").read().splitlines() + open(\"stop_words/spanish.stop\").read().splitlines() + [\"rt\"]\n# \"Stop Cities\" which includes unuseful words listed as cities in our city list\nstop_cities = open(\"stop_words/city.stop\").read().splitlines()\n\n# Input a value to check for. I've been testing with 1000\n# userInput = input(\"Please enter a baseline value: \")\n# try:\n# baseline = int(userInput)\n# except ValueError:\n# print(\"That's not an int!\")\n\n#print \"===============================================================================================\"\n\nwith inputfile as infile:\n reader = csv.reader(infile, delimiter=\",\")\n # Ignore first line of csv\n infile.readline()\n for i,line in enumerate(reader):\n tweet_time = datetime.strptime(line[0].replace(\"+0000 \", \"\"), \"%a %b %d %H:%M:%S %Y\")\n # Remove special characters\n tweet_body = re.sub('[^\\w .]+', '', line[1])\n\n # Go through each word and increase it's frequency count\n for word in tweet_body.split():\n # Check if first letter is capitalized and it is a reasonable length\n if len(word) > 2 and word[0].isupper():\n word = word.lower()\n if word not in stop_words:\n if word not in capital_count:\n capital_count[word] = [1, \"\"]\n else:\n capital_count[word][0] += 1\n if capital_count[word][0] == baseline:\n capital_count[word][1] = tweet_time\n if word in city_list and word not in stop_cities:\n city_count[word] = [tweet_count, tweet_time]\n tweet_count = tweet_count+1\n\n# sort the list of capital words by frequency\nsorted_capital_count = capital_count.items()\nsorted_capital_count.sort(key = lambda item: item[1])\n\n# sort cites by when they were added to dict (effectively date once baseline was reached)\ndated_cities = city_count.items()\ndated_cities.sort(key = lambda item: item[1])\n\n# Print everything\n#print \"\\nTop Words that Matched our cities list:\\n \"\n#for word,date in dated_cities:\n #print \"City: %15s Time: %s\" %(word, date[1])\n #willie.insert(0,word)\n\n\n#print \"===============================================================================================\"\n#print \"\\nWords that hit a baseline of %i and the time they reached it:\\n\" %(baseline)\n\nfor word,freq in sorted_capital_count:\n if freq[1] != \"\":\n #print \"Word: %18s Freq: %8i Time: %s\" %(word, freq[0], freq[1])\n #print word\n willie.insert(0,word)\n \nif userInput == 1:\n\tinputfile = open(\"All_Tweets_chile.csv\")\nelif userInput == 2:\n\tinputfile = open(\"No_Retweets_chile.csv\")\nelif userInput == 3:\n\tinputfile = open(\"No_Retweets_guatemala.csv\")\nelif userInput == 4:\n\tinputfile = open(\"No_Retweets_tornado.csv\")\n# Run Greg's code (allow for variables)\n# Save to a variable/file\n#!/usr/bin/env python\n# Key value pairs that keep track of each word and the number of times it occurs\nword_and_freq = dict()\nprevious_word_and_freq = dict()\n\n\n\n# Keeps track of the tweets each time period\ntweet_count = 0\n\n# \"Stop Words\" to ignore in search\nstop_words = open(os.path.dirname(__file__) + \"/stop_words/english.stop\").read().splitlines() + open(os.path.dirname(__file__) + \"/stop_words/spanish.stop\").read().splitlines() + [\"rt\"]\n\n# Key value pair that will carry all the terms that will eventually be added to the search, along with \n# how many times they are reported as increasing\nimportant_terms_to_watch = dict()\n\n# Term and it's total occurrences\nterm_total_occurrences = dict()\nwith inputfile as infile:\n reader = csv.reader(infile, delimiter=\",\")\n # Ignore first line of csv\n infile.readline()\n for i,line in enumerate(reader):\n tweet_count += 1\n tweet_time = datetime.strptime(line[0].replace(\"+0000 \", \"\"), \"%a %b %d %H:%M:%S %Y\")\n # Remove special characters\n tweet_body = re.sub('[^\\w .]+', '', line[1]).lower()\n \n # Grab the intial time of the first tweet\n if (i == 0):\n initial_time = tweet_time\n\n # Go through each word and increase it's frequency count\n for word in tweet_body.split():\n word = word.rstrip(\".\")\n # ignore stop words\n if word not in stop_words:\n if word not in word_and_freq:\n word_and_freq[word] = 1\n else:\n word_and_freq[word] += 1\n # keep track of the total occurrences of each word\n if word not in term_total_occurrences:\n term_total_occurrences[word] = 1\n else:\n term_total_occurrences[word] += 1\n \n # After time interval has passed, compare the terms \n if ((tweet_time - initial_time).seconds > time_interval):\n if previous_word_and_freq:\n for word,freq in word_and_freq.items():\n if word in previous_word_and_freq:\n prev_freq = previous_word_and_freq[word]\n # Calculate the rate of occurance for that word for this 10 mins and last 10 mins\n freq_rate = (float(freq) / tweet_count)\n prev_freq_rate = (float(prev_freq) / tweet_count)\n # If the rate of occurance for the word is increasing by a significant amount, add it to the list\n if freq_rate > (prev_freq_rate * exponent_threshold) and freq_rate > frequency_threshold:\n #print \"Frequency of word %15s is increasing. %.1f%% -> %.1f%% occurrence rate\" % (word, prev_freq_rate * 100, freq_rate * 100)\n # Keep track of the words that are increasing in frequency\n if word not in important_terms_to_watch:\n important_terms_to_watch[word] = 1\n else:\n important_terms_to_watch[word] += 1\n # Reset variables for the next time interval\n previous_word_and_freq = word_and_freq\n word_and_freq = dict() \n initial_time = tweet_time\n tweet_count = 0\n\n\n# Sort the list of words\nimportant_terms_to_watch = sorted(important_terms_to_watch.items(), key=itemgetter(1))\n\n# Print out final list\n#print \"\\nTop Terms Increasing in Frequency:\"\n#print \"===============================================================================================\"\nfor word,increases in important_terms_to_watch:\n #print \"%15s: %5d increases. (%7d total occurrences)\" % (word, increases, term_total_occurrences[word])\n #print word\n greg.insert(0,word)\n\n\n# Combine all the resutls and union them\n\noutfile = open(\"Output.txt\", 'w')\nfor i in range (len(byron)):\n count = 1\n if byron[i] in willie:\n count += 1\n if byron [i] in greg:\n count += 1\n if count == 3:\n #print byron[i]\n outfile.write(byron[i])\n outfile.write('\\n')\n\noutfile.close()\n\n\n\n\n\n","repo_name":"peterklipfel/soc_comp_final","sub_path":"Combined_results.py","file_name":"Combined_results.py","file_ext":"py","file_size_in_byte":11494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"30781385941","text":"from urllib import request\nfrom xmlrpc.client import DateTime\nfrom flask import Blueprint, render_template, flash, request\nfrom flask_login import login_required, current_user\nfrom .models import Program\nfrom . import db\nimport json\nimport datetime\n\nviews = Blueprint('views', __name__ )\ndef hentDato():\n current_time = datetime.datetime.now()\n idag = str(current_time.day) + str(current_time.month)\n return idag\n\n@views.route('/', methods = ['GET', 'POST'])\n@login_required\ndef home():\n if request.method == 'POST':\n name = request.form.get('name')\n time_start = request.form.get('time_start')\n time_end = request.form.get('time_end')\n date = request.form.get('date')\n \n \n if len(name) < 1:\n flash('name too short', category='error')\n elif int(time_start) < 0:\n flash('tid start feil', category='error')\n elif int(time_end) < 0:\n flash('tid slutt feil', category='error')\n else:\n new_program = Program(Name=name,user_id=current_user.id, time_start=time_start, time_end=time_end,date=date, votes= 0 )\n db.session.add(new_program)\n db.session.commit()\n flash('Program lagt til', category='success') \n return render_template(\"home.html\", user = current_user )\n\n@views.route('/delete-program', methods=['POST'])\ndef delete_program():\n program = json.loads(request.data)\n programId = program['programId']\n program = Program.query.get(programId)\n if program:\n if program.user_id == current_user.id:\n db.session.delete(program)\n db.session.commit()\n return jsonify({})\n \n \n@views.route('/programList', methods=['POST','GET'])\n@login_required\ndef showPrograms():\n if request.method=='GET':\n try:\n args = request.args\n dato = args.get('date', default=hentDato(), type=int)\n prog = Program.query.filter_by(date=dato).order_by(Program.votes.desc()).all()\n return render_template(\"programList.html\", Program = prog, user=current_user, date=dato)\n except Exception as e:\n # e holds description of the error\n error_text = \"

The error:
\" + str(e) + \"

\"\n hed = '

Something is broken.

'\n return hed + error_text\n \n return render_template(\"programList.html\")\n\n@views.route('/vote-program', methods=['POST'])\ndef vote_program():\n program = json.loads(request.data)\n programId = program['programId']\n program = Program.query.get(programId)\n if program:\n program.votes += 1\n db.session.commit()\n return jsonify({})","repo_name":"FrodeSanden/Tv-stua_web-app","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"25331507805","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n# https://leetcode.com/problems/binary-tree-zigzag-level-order-traversal/submissions/\n\nfrom collections import deque\n\nclass Solution:\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return []\n nodes = deque([(root, 0)])\n result = []\n while nodes:\n current, val = nodes.popleft()\n result.append((current.val,val))\n if current.left:\n nodes.append((current.left, val+1))\n if current.right:\n nodes.append((current.right, val+1))\n i = 0\n local_result = []\n global_result = []\n while i < len(result):\n current_val = result[i][1]\n while i < len(result) and result[i][1] == current_val:\n local_result.append(result[i][0])\n i += 1\n if current_val % 2 == 1:\n global_result.append(reversed(local_result))\n else:\n global_result.append(local_result)\n local_result = []\n return global_result\n \n ","repo_name":"memoryonrepeat/algo","sub_path":"leetcode/binarytreezigzagtraversal.py","file_name":"binarytreezigzagtraversal.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"73177039498","text":"import pandas as pd\nimport os\nimport requests\nimport time\n\ndef get_historical_data(stock: str, apikey: str):\n # Define folder, filename and file path\n file_name = f'{stock}.csv'\n data_dir = 'data/hist/'\n file_path = f'{data_dir}{file_name}'\n \n # Check if stock already in data lake\n if file_name in os.listdir(data_dir):\n data = pd.read_csv(file_path, index_col=0)\n return data\n \n # If stock not found locally, return API call\n data = get_historical_data_API(stock, apikey)\n if 'timestamp' in data.columns:\n data.to_csv(file_path)\n data['close'] = data['close'].astype(float)\n return data\n else:\n raise KeyError('Column timestamp not in dataframe!')\n\n\ndef get_historical_data_API(stock: str, apikey: str):\n API_URL = f\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={stock}&outputsize=full&datatype=csv&apikey={apikey}\"\n response = requests.get(API_URL)\n if 'Invalid API call' in response.text:\n raise KeyError(\"Ticker not valid!\")\n while '5 calls per minute' in response.text:\n time.sleep(60)\n response = requests.get(API_URL)\n text = [sub.split(\",\") for sub in response.text.split(\"\\r\\n\")]\n output = pd.DataFrame(text[1:], columns=text[0]).dropna()\n output['ticker'] = stock\n output['timestamp'] = output['timestamp'].str.replace('-', '/')\n output['close'] = output['close'].astype(float) * 100\n return output[100:]\n\n\n\n","repo_name":"ValentinKuhn/Workshop_ETH","sub_path":"frontend/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"41997862859","text":"# -*- coding: utf-8 -*-\r\n\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup \r\nimport csv\r\nimport sys\r\nimport urllib.parse # pip3 install urllib3\r\nimport json # https://note.com/masato1230/n/nba86746179ca\r\nimport requests # pip3 install requests\r\nimport time\r\nimport os\r\nimport sqlite3\r\nimport folium\r\n\r\n#http://www.kabu-data.info/all_code/code_tosyo1_code.htm\r\ndef ReadCodeList():\r\n ret = []\r\n i = 0\r\n with open(\"Book1.csv\",encoding='utf-8') as f: # 引数に与えたcsvファイル読み込み\r\n reader = csv.reader(f)\r\n for line in reader: # 各行読み取り\r\n buf = []\r\n buf.append(line[0])\r\n buf.append(line[1])\r\n ret.append(buf)\r\n return ret\r\n\r\ndef SearchAddress(CodeNum):\r\n url = \"https://profile.yahoo.co.jp/fundamental/\"+str(CodeNum)\r\n html = urllib.request.urlopen(url.replace(\"\\ufeff\", \"\"))\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n items = soup.select('div.profile > div > div > table > tr > td > table > tr > td')\r\n Address = items[5].text\r\n return Address.split()[1]\r\n\r\ndef MakeMap(Address, companyName):\r\n s_quote = urllib.parse.quote(Address) # 住所の文字列をURLエンコード\r\n response = response = requests.get(makeUrl + s_quote) # エンコードした文字列を国土地理院APIの引数として与えてget request\r\n if response.json() == []: # レスポンスされたjsonデータの中身を確認し空だったら\r\n print(\"[Error] 住所がよくわかりませんでした\") # 判定できなかった旨を出力し緯度・経度は空文字を格納\r\n elif len(response.json()) >1: # 候補が複数あった場合、判定出来ないためスキップ\r\n print(\"[Error] 住所の絞り込みが出来ず複数候補が出ました \\n 住所の絞り込みをおこなってください \")\r\n else: # レスポンスされたjsonデータが空でなかった場合\r\n folium.Marker(location=[response.json()[0][\"geometry\"][\"coordinates\"][1], response.json()[0][\"geometry\"][\"coordinates\"][0]], popup=companyName).add_to(map)\r\n \r\n map.save(\"result.html\")\r\n\r\nlocationName = \"\"\r\nmakeUrl = \"https://msearch.gsi.go.jp/address-search/AddressSearch?q=\" \r\nmap = folium.Map(location=[35.681561, 139.767197], zoom_start=8)\r\nif __name__ == \"__main__\":\r\n List = ReadCodeList()\r\n for list in List:\r\n Address = SearchAddress(list[0])\r\n time.sleep(5) # 国土地理院APIに負荷を掛けないように\r\n MakeMap(Address,list[1])\r\n time.sleep(5) # 国土地理院APIに負荷を掛けないように\r\n\r\n","repo_name":"Elsammit/SearchAddressToMapInfo","sub_path":"scraiping.py","file_name":"scraiping.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"ja","doc_type":"code","stars":4,"dataset":"github-code","pt":"46"} +{"seq_id":"2576442881","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport codecs\nimport os\nimport re\nimport sys\n\nfrom setuptools import setup\n\nroot_dir = os.path.abspath(os.path.dirname(__file__))\n\n\ndef get_version(package_name):\n version_re = re.compile(r\"^__version__ = [\\\"']([\\w_.-]+)[\\\"']$\")\n package_components = package_name.split('.')\n init_path = os.path.join(root_dir, *(package_components + ['__init__.py']))\n with codecs.open(init_path, 'r', 'utf-8') as f:\n for line in f:\n match = version_re.match(line[:-1])\n if match:\n return match.groups()[0]\n return '0.1.0'\n\n\nPACKAGE = 'restricted_pkg'\n\n\nsetup(\n name=\"restricted_pkg\",\n version=get_version(PACKAGE),\n author=\"Raphaël Barrois\",\n author_email=\"raphael.barrois@polytechnique.org\",\n description=\"A simple setup.py helper for private repositories\",\n license=\"MIT\",\n keywords=['pypi', 'package index', 'private', 'repository'],\n url=\"http://github.com/rbarrois/restricted_pkg\",\n download_url=\"http://pypi.python.org/pypi/restricted_pkg/\",\n packages=[\n 'restricted_pkg',\n ],\n setup_requires=[\n 'setuptools>=0.8',\n ],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 2\",\n ],\n test_suite='tests',\n entry_points={\n 'distutils.setup_keywords': [\n 'private_repository = restricted_pkg.validators:validate_private_repo',\n ],\n },\n)\n","repo_name":"rbarrois/restricted_pkg","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"46"} +{"seq_id":"15243031405","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 18 21:59:49 2018\n\n@author: DELL\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport time\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Activation, Dropout\nfrom keras.layers.recurrent import LSTM\n\na1=np.loadtxt(r'D:\\1806\\DATA FILES\\C111-Full.txt')\na2=np.loadtxt(r'D:\\1806\\DATA FILES\\C110-Full.txt')\n\ntrain = np.empty((160000,100),dtype=\"float32\")\nlabel = np.empty((160000,1),dtype=\"float32\")\ntrain1 = np.empty((160000,100),dtype=\"float32\")\nlabel1 = np.empty((160000,1),dtype=\"float32\")\ntest = np.empty((160000,100),dtype=\"float32\")\ntestlabel = np.empty((160000,1),dtype=\"float32\")\nfor i in range(160000):\n train[i,:] = a1[12000+i:12100+i,-1]\n label[i] = a1[12100+i,2]\n test[i,:] = a1[12000+i:12100+i,-1]\n testlabel[i] = a1[12100+i,2]\n\niii = [ii for ii in range(160000)]\nnp.random.shuffle(iii)\nfor i in range(160000):\n train1[i,:] = train[iii[i],:]\n label1[i] = label[iii[i]]\nX_train = train1\ny_train = label1\nX_test = test\ny_test = testlabel\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n\ndef build_model():\n model = Sequential()\n layers = [1, 50, 100, 1]\n\n model.add(LSTM(\n layers[1],\n input_shape=(None, 1),\n return_sequences=True))\n model.add(Dropout(0.2))\n \n model.add(LSTM(\n layers[2],\n return_sequences=False))\n model.add(Dropout(0.2))\n model.add(Dense(\n layers[3]))\n model.add(Activation(\"linear\"))\n start = time.time()\n model.compile(loss=\"mse\", optimizer=\"rmsprop\")\n print(\"Compilation Time : \", time.time() - start)\n return model\ndef run_network(model=None):\n if model is None:\n model = build_model()\n model.load_weights('test2.h5')\n predicted = model.predict(X_test)\n predicted = np.reshape(predicted, (predicted.size,))\n try:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(y_test[:, 0])\n plt.plot(predicted[:])\n plt.show()\n except Exception as e:\n print(str(e))\n# print('Training duration (s) : ', time.time() - global_start_time)\n return model, y_test, predicted\n\n[mo, y_t, pred] = run_network()","repo_name":"hobolee/1806","sub_path":"lstm/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"43552464978","text":"from django.shortcuts import redirect\r\nfrom django.urls import path\r\nfrom .views.AutorView import CrearAutor, ListadoAutor, ActualizarAutor, EliminarAutor\r\nfrom .views.LibroView import ListarLibro, CrearLibro, ActualizarLibro, EliminarLibro\r\n\r\nurlpatterns = [\r\n path(\r\n \"\",\r\n lambda request: redirect(\"listar_autor/\", permanent=True),\r\n name=\"libro-default-page\",\r\n ),\r\n path(\"listar_autor/\", ListadoAutor.as_view(), name=\"listar_autor\"),\r\n path(\"crear_autor/\", CrearAutor.as_view(), name=\"crear_autor\"),\r\n path(\"editar_autor/\", ActualizarAutor.as_view(), name=\"editar_autor\"),\r\n path(\"eliminar_autor/\", EliminarAutor.as_view(), name=\"eliminar_autor\"),\r\n path(\"listar_libro/\", ListarLibro.as_view(), name=\"listar_libro\"),\r\n path(\"crear_libro/\", CrearLibro.as_view(), name=\"crear_libro\"),\r\n path(\"editar_libro/\", ActualizarLibro.as_view(), name=\"editar_libro\"),\r\n path(\"eliminar_libro/\", EliminarLibro.as_view(), name=\"eliminar_libro\"),\r\n]\r\n","repo_name":"BraianAzcune/django-biblioteca","sub_path":"apps/libro/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"18704254489","text":"import pandas as pd\n\ndef enrich_shot_data(project_path='/home/mpare/src/nba_shot_quality'):\n '''\n Function to enrich the shot data with advanced player stats from\n previous season\n\n arguments:\n project_path = path of the project directory\n '''\n # load the shot and player data\n shot_df = pd.read_csv('{}/data/interim/shot_logs.csv'.format(project_path))\n player_df = pd.read_csv('{}/data/interim/player_advanced_data.csv'.format(project_path))\n\n # create a df with offensive features to add\n offense_df = player_df[['Player','TS%']]\n\n # create a df with defensive features to add\n defense_df = player_df[['Player','DWS','DBPM','BLK%']]\n defense_df.columns = ['Player','Defender_DWS','Defender_DBPM','Defender_BLK%']\n\n # merge the shot df with the offense and defense dfs\n full_df = pd.merge(shot_df, offense_df, left_on = 'player_name', right_on = 'Player', how = 'inner')\n full_df.drop('Player', axis = 1, inplace = True)\n full_df = pd.merge(full_df, defense_df, left_on = 'CLOSEST_DEFENDER', right_on = 'Player', how = 'inner')\n full_df.drop('Player', axis = 1, inplace = True)\n\n # keep the desired columns\n cols_to_keep = ['PERIOD','SHOT_CLOCK','DRIBBLES','TOUCH_TIME','SHOT_DIST',\n 'SHOT_RESULT','CLOSE_DEF_DIST','TS%','Defender_DWS','Defender_DBPM',\n 'Defender_BLK%']\n full_df = full_df[cols_to_keep]\n\n full_df.to_csv('{}/data/processed/shot_logs.csv'.format(project_path))\n\n\nif __name__ == '__main__':\n enrich_shot_data()\n","repo_name":"morganpare/nba_shot_quality","sub_path":"src/data/enrich_shot_data.py","file_name":"enrich_shot_data.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"44221898313","text":"import keras.backend\nimport keras.engine\nimport numpy\n\nimport keras_rcnn.backend\nimport keras_rcnn.layers\n\n\nclass ObjectProposal(keras.engine.topology.Layer):\n \"\"\"Propose object-containing regions from anchors\n\n # Arguments\n maximum_proposals: maximum number of regions allowed\n min_size: minimum width/height of proposals\n stride: stride size\n\n # Input shape\n (width of feature map, height of feature map, scale), (None, 4), (None)\n\n # Output shape\n (# images, # proposals, 4)\n \"\"\"\n def __init__(self, maximum_proposals=300, min_size=16, stride=16, **kwargs):\n self.maximum_proposals = maximum_proposals\n\n # minimum width/height of proposals in original image size\n self.min_size = min_size\n\n self.stride = stride\n\n super(ObjectProposal, self).__init__(**kwargs)\n\n def build(self, input_shape):\n super(ObjectProposal, self).build(input_shape)\n\n def call(self, inputs, **kwargs):\n \"\"\"\n `image_shape_and_scale` has the shape [width, height, scale]\n \"\"\"\n image_shape_and_scale, deltas, scores = inputs\n\n # the first set of anchors channels are bg probs\n # the second set are the fg probs, which we want\n # scores = scores[:, :, :, 9:]\n\n rr = keras.backend.shape(scores)[1]\n cc = keras.backend.shape(scores)[2]\n\n # TODO: Fix usage of batch index\n batch_index = 0\n\n image_shape = image_shape_and_scale[batch_index, :2]\n image_scale = image_shape_and_scale[batch_index, -1]\n\n # 1. generate proposals from bbox deltas and shifted anchors\n anchors = keras_rcnn.backend.shift([rr, cc], self.stride)\n\n deltas = keras.backend.reshape(deltas, (-1, 4))\n scores = keras.backend.reshape(scores, (-1, 1))\n\n deltas = keras_rcnn.backend.bbox_transform_inv(anchors, deltas)\n\n # 2. clip predicted boxes to image\n proposals = keras_rcnn.backend.clip(deltas, image_shape)\n\n # 3. remove predicted boxes with either height or width < threshold\n # (NOTE: convert min_size to input image scale stored in im_info[2])\n indices = filter_boxes(proposals, self.min_size * image_scale)\n proposals = keras.backend.gather(proposals, indices)\n\n scores = scores[..., (scores.shape[-1] // 2):]\n scores = keras.backend.reshape(scores, (-1, 1))\n scores = keras.backend.gather(scores, indices)\n scores = keras.backend.flatten(scores)\n\n # 4. sort all (proposal, score) pairs by score from highest to lowest\n indices = keras_rcnn.backend.argsort(scores)\n\n # TODO: is this a sensible value? parameterize?\n rpn_pre_nms_top_n = 12000\n\n # 5. take top pre_nms_topN (e.g. 6000)\n if rpn_pre_nms_top_n > 0:\n indices = indices[:rpn_pre_nms_top_n]\n\n proposals = keras.backend.gather(proposals, indices)\n scores = keras.backend.gather(scores, indices)\n\n # 6. apply nms (e.g. threshold = 0.7)\n indices = keras_rcnn.backend.non_maximum_suppression(proposals, scores, self.maximum_proposals, 0.7)\n\n proposals = keras.backend.gather(proposals, indices)\n\n # 8. return the top proposals (-> RoIs top)\n return keras.backend.expand_dims(proposals, 0)\n\n def compute_output_shape(self, input_shape):\n return None, self.maximum_proposals, 4\n\n\ndef filter_boxes(proposals, minimum):\n \"\"\"\n Filters proposed RoIs so that all have width and height at least as big as minimum\n\n \"\"\"\n ws = proposals[:, 2] - proposals[:, 0] + 1\n hs = proposals[:, 3] - proposals[:, 1] + 1\n\n indices = keras_rcnn.backend.where((ws >= minimum) & (hs >= minimum))\n\n indices = keras.backend.flatten(indices)\n\n return keras.backend.cast(indices, \"int32\")\n","repo_name":"DemonDamon/tongue_classification_based_on_multi_networks","sub_path":"keras_rcnn/keras_rcnn/layers/object_detection/_object_proposal.py","file_name":"_object_proposal.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"25277815407","text":"import sqlite3\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QWidget, QHBoxLayout, QPushButton, QLineEdit, QTableWidget, QTableWidgetItem, QHeaderView, QSizePolicy, QDialog, QLabel, QDialogButtonBox, QMessageBox, QCheckBox, QFileDialog\nfrom PyQt5.QtGui import QColor, QPixmap, QIcon\nfrom PyQt5.QtCore import Qt\nimport sys\n\n\nfrom AddItemDialog import AddItemDialog\nfrom ViewItemDialog import ViewItemDialog\nimport csv\n\n\n\n\n# Create the SQLite database connection\nconn = sqlite3.connect('database.db')\nc = conn.cursor()\n\nc.execute('''CREATE TABLE IF NOT EXISTS items\n (ID INTEGER PRIMARY KEY AUTOINCREMENT, \n Name TEXT, \n Model TEXT, \n Description TEXT, \n Datasheet TEXT, \n Image TEXT, \n Qty INTEGER, \n Storage_Location TEXT, \n Purchase_Place TEXT, \n Project TEXT, \n Ordered INTEGER, \n Price_Per_Unit INTEGER, \n Used_Part INTEGER)''')\n\napp = QApplication(sys.argv)\n\n\n# Set the application icon\napp_icon_path = \"icon.PNG\"\napp_icon = QIcon(app_icon_path)\napp.setWindowIcon(app_icon)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Component Manager\")\n self.resize(1400, 600)\n \n # Create a central widget and set it as the main window's central widget\n central_widget = QWidget()\n self.setCentralWidget(central_widget)\n\n # Create a horizontal layout manager for the main layout\n main_layout = QHBoxLayout()\n central_widget.setLayout(main_layout)\n\n # Create a widget for the left side\n left_widget = QWidget()\n left_layout = QVBoxLayout()\n left_widget.setLayout(left_layout)\n\n # Create a QTableWidget to display the database entries\n self.table_widget = QTableWidget()\n left_layout.addWidget(self.table_widget)\n\n # Adjust the column widths to stretch and fill the available space\n self.table_widget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n\n # Create a custom widget for the right side\n right_widget = QWidget()\n right_layout = QVBoxLayout()\n right_widget.setLayout(right_layout)\n\n\n\n# Create a QLabel to display the image\n image_label = QLabel()\n pixmap = QPixmap(\"logo.PNG\") # Load the image\n scaled_pixmap = pixmap.scaled(600, 300, Qt.AspectRatioMode.KeepAspectRatio) # Resize the image\n image_label.setPixmap(scaled_pixmap) # Set the image pixmap\n right_layout.addWidget(image_label)\n\n # Create a spacer item to maintain spacing between the buttons\n spacer = QWidget()\n spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n right_layout.addWidget(spacer)\n\n\n # Create a QLineEdit for the text box\n self.text_box = QLineEdit()\n self.text_box.textChanged.connect(self.search_items)\n right_layout.addWidget(self.text_box)\n\n\n # Create the refresh button\n refresh_button = QPushButton(\"Refresh\")\n refresh_button.clicked.connect(self.refresh_items)\n right_layout.addWidget(refresh_button)\n\n # Create the export button\n export_button = QPushButton(\"Export to CSV\")\n export_button.clicked.connect(self.export_to_csv)\n right_layout.addWidget(export_button)\n\n button2 = QPushButton(\"Add New Item\")\n button2.clicked.connect(self.show_add_item_dialog)\n right_layout.addWidget(button2)\n\n # Set the stretch factor for the left and right widgets\n main_layout.addWidget(left_widget, 2)\n main_layout.addWidget(right_widget, 1)\n\n self.load_data()\n\n\n def load_data(self, search_term=None):\n self.table_widget.clearContents()\n self.table_widget.setRowCount(0)\n\n query = \"SELECT ID, Name, Model, Qty, Storage_Location, Ordered FROM items\"\n if search_term:\n query += f\" WHERE Name LIKE '%{search_term}%' OR Model LIKE '%{search_term}%' OR Description LIKE '%{search_term}%'\"\n\n \n c.execute(query)\n data = c.fetchall()\n\n self.table_widget.setRowCount(len(data))\n self.table_widget.setColumnCount(6)\n self.table_widget.setHorizontalHeaderLabels(\n [\"ID\", \"Name\", \"Model\", \"Qty\", \"Storage Location\", \"\", \"Ordered\"])\n self.table_widget.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n\n for row, item in enumerate(data):\n for col, value in enumerate(item):\n item_widget = QTableWidgetItem(str(value))\n self.table_widget.setItem(row, col, item_widget)\n\n if col == 3 and item[5] == 1:\n item_widget.setBackground(QColor(\"yellow\"))\n item_widget.setText(f\"{item[3]} (On Order)\")\n\n view_button = QPushButton(\"View\")\n view_button.setStyleSheet(\"background-color: blue; color: white;\")\n view_button.clicked.connect(lambda _, r=row: self.view_item(r))\n self.table_widget.setCellWidget(row, 5, view_button)\n \n\n def show_add_item_dialog(self):\n dialog = AddItemDialog()\n if dialog.exec_() == QDialog.Accepted:\n item_data = dialog.get_item_data()\n self.add_item(*item_data, dialog.datasheet if hasattr(dialog, 'datasheet') else '')\n\n\n\n def add_item(self, name, model, qty, storage_location, price_per_unit, description, project, ordered, purchase_price_input, used_part, place_of_purchase, datasheet=None):\n c.execute(\n \"INSERT INTO items (Name, Model, Qty, Storage_Location, Price_per_unit, Description, Project, Ordered, Price_Per_Unit, Used_Part, Purchase_Place, Datasheet) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n (name, model, qty, storage_location, price_per_unit, description, project, ordered, purchase_price_input, used_part, place_of_purchase, datasheet))\n conn.commit()\n self.load_data()\n\n\n def view_item(self, row):\n item_id_text = self.table_widget.item(row, 0).text()\n if item_id_text:\n try:\n item_id = int(item_id_text)\n except ValueError:\n QMessageBox.warning(self, \"Invalid ID\", \"Invalid ID value.\")\n return\n else:\n QMessageBox.warning(self, \"Invalid ID\", \"Empty ID value.\")\n return\n c.execute(\n \"SELECT ID, Name, Model, Description, Datasheet, Image, Qty, Storage_Location, Purchase_Place, Project, Ordered, Price_Per_Unit, Used_Part FROM items WHERE ID = ?\",\n (item_id,))\n item_info = c.fetchone()\n id, name, model, description, datasheet, image, qty, storage_location, purchase_place, project, ordered, price_per_unit, used_part = item_info if item_info else (\n \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\")\n #print(item_info)\n #print(name)\n\n\n\n dialog = ViewItemDialog(id, name, model, description, datasheet, image, qty, storage_location, purchase_place, project, ordered, price_per_unit, used_part)\n \n dialog.setGeometry(100, 100, 600, 400)\n if dialog.exec_() == QDialog.Accepted:\n self.load_data()\n\n\n def refresh_items(self):\n self.load_data(\"\")\n \n def search_items(self):\n search_term = self.text_box.text()\n self.load_data(search_term)\n\n def export_to_csv(self):\n # Get all the data from the database\n c.execute(\"SELECT * FROM items\")\n data = c.fetchall()\n\n # Get the column names\n column_names = [column[0] for column in c.description]\n\n # Combine column names and data into a list of rows\n rows = [column_names] + data\n\n # Open a file dialog to choose the save location\n file_dialog = QFileDialog()\n file_path, _ = file_dialog.getSaveFileName(self, \"Save as CSV\", \"\", \"CSV Files (*.csv)\")\n if file_path:\n try:\n with open(file_path, \"w\", newline=\"\") as csvfile:\n csv_writer = csv.writer(csvfile)\n # Write the rows to the CSV file\n csv_writer.writerows(rows)\n QMessageBox.information(self, \"Export Successful\", \"Data exported to CSV successfully.\")\n except Exception as e:\n QMessageBox.warning(self, \"Export Failed\", f\"Failed to export data to CSV:\\n{str(e)}\")\n else:\n QMessageBox.warning(self, \"Export Cancelled\", \"Export to CSV cancelled.\")\n\n \n\nc.execute('''CREATE TABLE IF NOT EXISTS items\n (ID INTEGER PRIMARY KEY AUTOINCREMENT, \n Name TEXT, \n Model TEXT, \n Description TEXT, \n Datasheet TEXT, \n Image TEXT, \n Qty INTEGER, \n Storage_Location TEXT, \n Purchase_Place TEXT, \n Project TEXT, \n Ordered INTEGER, \n Price_Per_Unit INTEGER, \n Used_Part INTEGER)''')\n\n\n# Create and show the main window\nwindow = MainWindow()\nwindow.show()\n\n# Start the event loop\nsys.exit(app.exec_())","repo_name":"Quenel/Component-Manager","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28831851625","text":"from discord.ext import commands\nimport discord\n\nimport datetime\nimport aiohttp\nimport feedparser\n\ndef clean_content(text):\n return discord.utils.escape_mentions(discord.utils.escape_markdown(text))\n\nclass rsp_news(commands.Cog):\n class NewsData:\n last_update = None\n rss = None\n site = None\n rss_url = None\n\n def __init__(self, get_time: datetime.datetime, rss: feedparser.util.FeedParserDict, rss_url: str, site: str):\n self.last_update = get_time\n self.rss = rss\n self.site = site\n self.rss_url = rss_url\n\n def __init__(self, bot):\n self.bot = bot\n if hasattr(bot, \"session\") and isinstance(bot.session, aiohttp.client.ClientSession):\n self.session = bot.session\n else:\n self.session = aiohttp.ClientSession()\n\n self.embed_color = 0x36b8fa\n self.rss_page = {\"old\": \"https://www.risupunet.jp/?feed=rss2\", \"new\": \"https://www.risupunet.jp/feed/\", \"beta\": \"https://www.risupunet.jp/feed/\", \"rifupu\": \"https://rifupu.xyz/?feed=rss2\"}\n self.site_url = {\"old\": \"https://www.risupunet.jp/\", \"new\": \"https://www.risupunet.jp/\", \"beta\": \"https://www.risupunet.jp/\", \"rifupu\": \"https://rifupu.xyz/\"}\n self.icon_url = {\"default\": \"https://www.risupunet.jp/favicon.ico\", \"rifupu\": discord.Embed.Empty}\n self.author_text = {\"default\": \"RisuPu News\", \"rifupu\": \"RifuPu\"}\n self.caches = []\n self.cache_expires = 60*60\n self.cache_ignores = [415526420115095554]\n\n #get_data\n async def get_data(self, site):\n async with self.bot.session.get(site, proxy=f\"{self.bot.config.PROXY_URL}:{self.bot.config.PROXY_PORT}\") as request:\n return await request.text()\n\n @commands.command(name=\"risupunews\", aliases=[\"rspnews\"], description=\"RisuPuのお知らせ一覧を表示します。\", usage=\"rsp!risupunews | rsp!rspnews \")\n async def risupunews(self, ctx, site: str=\"new\"):\n if site in self.rss_page:\n feeds = [c for c in self.caches if c.site == site]\n if feeds:\n if (feeds[0].last_update+datetime.timedelta(seconds=self.cache_expires) < datetime.datetime.utcnow()) or ctx.author.id in self.cache_ignores:\n async with ctx.channel.typing():\n ret = await self.get_data(self.rss_page[site])\n data = self.NewsData(datetime.datetime.utcnow(), feedparser.parse(ret), self.rss_page[site], site)\n self.caches.remove(feeds[0])\n self.caches.append(data)\n else:\n data = feeds[0]\n else:\n async with ctx.channel.typing():\n ret = await self.get_data(self.rss_page[site])\n data = self.NewsData(datetime.datetime.utcnow(), feedparser.parse(ret), self.rss_page[site], site)\n self.caches.append(data)\n\n feed = data.rss\n\n embed = discord.Embed(description=\"\", color=self.embed_color)\n\n icon_url = self.icon_url.get(site, self.icon_url[\"default\"])\n\n if site in self.site_url:\n embed.set_author(name=self.author_text.get(site, self.author_text.get(\"default\")), icon_url=icon_url, url=self.site_url[site])\n else:\n embed.set_author(name=self.author_text.get(site, self.author_text.get(\"default\")), icon_url=icon_url)\n\n for entry in feed.entries:\n embed.description += f\"[`{clean_content(entry.title)}`]({clean_content(entry.link)})\\n - {(datetime.datetime.strptime(entry.published, '%a, %d %b %Y %H:%M:%S %z')+datetime.timedelta(hours=9)).strftime('%Y %m/%d %H:%M')}\\n\"\n\n embed.set_footer(text=\"最終取得時刻\")\n embed.timestamp = data.last_update\n\n try:\n await ctx.reply(embed=embed)\n except AttributeError:\n await ctx.send(content=f\"{ctx.author.mention} ->\", embed=embed)\n\n else:\n embed = discord.Embed(title=\"エラー\", description=\"**指定されたキーが存在しません。**\", color=0xFF0000)\n try:\n await ctx.reply(embed=embed)\n except AttributeError:\n await ctx.send(content=f\"{ctx.author.mention} ->\", embed=embed)\n\ndef setup(bot):\n bot.add_cog(rsp_news(bot))\n\nif __name__ == \"__main__\":\n print(\"りくりくりーくねっ!\")\n print(\"bot.load_extensionしてね!\")\n","repo_name":"midorichaan/DBot_3","sub_path":"cogs/rsp_news.py","file_name":"rsp_news.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"26099005162","text":"# coding: utf8\n\n\n# 插入排序\ndef insert_sort(lists):\n # 插入排序 假设第一个元素是排好的序列,第二个元素以后是待插入序列,循环待插入序列\n for i in range(1, len(lists)):\n key = lists[i] # 取待插入序列的每个元素\n j = i - 1\n while j >= 0: # 与排好的序列每个元素比较\n if lists[j] > key:\n lists[j + 1], lists[j] = lists[j], lists[j + 1]\n j -= 1\n return lists\n\n\n# 冒泡排序\ndef short_bubble_sort(a_list):\n exchanges = False\n pass_num = len(a_list) - 1 # 外层循环次数\n while pass_num > 0 and not exchanges:\n exchanges = True\n for i in range(pass_num):\n if a_list[i] > a_list[i + 1]: # 只要有一个元素没排好序,exchanges = False\n exchanges = False\n a_list[i],a_list[i+1] = a_list[i+1], a_list[i]\n pass_num = pass_num - 1\n\n\n# 选择排序, 假设第一个元素是最大的,循环剩余的元素余值比较,大则交换位置,找出最大的元素\n# 外层循环为倒叙,一次是最大的元素需要插入的位置。\ndef selection_sort(a_list):\n for fill_slot in range(len(a_list) - 1, 0, -1):\n pos_of_max = 0\n for location in range(1, fill_slot + 1):\n if a_list[location] > a_list[pos_of_max]:\n pos_of_max = location\n a_list[fill_slot],a_list[pos_of_max]=a_list[pos_of_max],a_list[fill_slot]\n\n\n# 快速排序,选第一个数作为基准数,小的放在左边,大的放在右边。\ndef qsort(seq):\n if seq==[]:\n return []\n else:\n pivot=seq[0]\n lesser=qsort([x for x in seq[1:] if x=pivot])\n return qsort(lesser)+[pivot]+qsort(greater)\n\n\n# 二分查找\ndef binarySearch(l, t):\n low, high = 0, len(l) - 1\n while low < high:\n mid = (low + high) / 2\n if l[mid] > t:\n high = mid\n elif l[mid] < t:\n low = mid + 1\n else:\n return mid\n return low if l[low] == t else False # 此时 low == high\n\n\n# 栈的实现, 先进先出\nclass Stack:\n def __init__(self):\n self.items = []\n def is_empty(self):\n return self.items == []\n def push(self, item):\n self.items.append(item)\n def pop(self):\n return self.items.pop()\n def peek(self): #查看栈的顶部的对象\n return self.items[len(self.items)-1]\n def size(self):\n return len(self.items)\n\n\n# 队列的实现, 先进后出\nclass Queue:\n def __init__(self):\n self.items = []\n def is_empty(self):\n return self.items == []\n def enqueue(self, item):\n self.items.insert(0,item)\n def dequeue(self):\n return self.items.pop()\n def size(self):\n return len(self.items)\n\n\n# 二叉树的实现\nclass BinaryTree:\n def __init__(self, root):\n self.key = root\n self.left_child = None\n self.right_child = None\n\n def insert_left(self, new_node):\n if self.left_child == None:\n self.left_child = BinaryTree(new_node)\n else:\n t = BinaryTree(new_node)\n t.left_child = self.left_child\n self.left_child = t\n\n def insert_right(self, new_node):\n if self.right_child == None:\n self.right_child = BinaryTree(new_node)\n else:\n t = BinaryTree(new_node)\n t.right_child = self.right_child\n self.right_child = t\n\n# 堆是一种完全二叉树,堆排序是一种树形选择排序,利用了大顶堆堆顶元素最大的特点,不断取出最大元素,并调整使剩下的元素还是大顶堆,依次取出最大元素就是排好序的列表。\n\n\ndef recur_fibo(n):\n \"\"\"\n 递归函数\n 输出斐波那契数列\n \"\"\"\n if n <= 1:\n return n\n else:\n return recur_fibo(n-1) + recur_fibo(n-2)\n\n\n# 汉诺塔递归(压栈出栈)\ndef move(n, a, buffer, c):\n if n == 1:\n print(a, \"->\", c)\n return\n move(n-1, a, c, buffer) # 把n-1个盘子由 a 移动到 b 借助 C buffer 表示 b\n move(1, a, buffer, c) # 把最后一个盘子由 a 移动到 C\n move(n-1, buffer, a, c) # 把n-1个盘子由 b 移动到 c 借助 a\n\n\n# 合并两个有序列表\n# 第归\ndef _recursion_merge_sort2(l1, l2, tmp):\n if len(l1) == 0 or len(l2) == 0:\n tmp.extend(l1)\n tmp.extend(l2)\n return tmp\n else:\n if l1[0] < l2[0]:\n tmp.append(l1[0])\n del l1[0]\n else:\n tmp.append(l2[0])\n del l2[0]\n return _recursion_merge_sort2(l1, l2, tmp)\n\n\n# 循环算法\ndef loop_merge_sort(l1, l2):\n tmp = []\n while len(l1) > 0 and len(l2) > 0:\n if l1[0] < l2[0]:\n tmp.append(l1[0])\n del l1[0]\n else:\n tmp.append(l2[0])\n del l2[0]\n tmp.extend(l1)\n tmp.extend(l2)\n return tmp\n","repo_name":"Zhaoyu1123/notes","sub_path":"python-notes/sort_algorithm.py","file_name":"sort_algorithm.py","file_ext":"py","file_size_in_byte":5029,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"29694698719","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models.functions import Lower\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, render\n\nfrom artists.models import Artist\nfrom base.views import spotify_login\n\n\ndef get_source_all_artists(*, user, user_id, mine):\n if user_id:\n user = get_object_or_404(get_user_model(), pk=user_id)\n return user.library.artists.all(), user\n if mine:\n return user.library.artists.all(), user\n return Artist.objects.all(), None\n\n\n@login_required\ndef all_artists(request, user_id=None, mine=False):\n source, displaying_user = get_source_all_artists(\n user=request.user, user_id=user_id, mine=mine\n )\n artists = source.order_by(Lower('name'))\n return render(\n request,\n 'all_artists.html',\n {'artists': artists, 'displaying_user': displaying_user},\n )\n\n\n@login_required\ndef single_artist(request, artist_id):\n artist = get_object_or_404(Artist, spotify_id=artist_id)\n tracks = request.user.library.tracks.filter(artists=artist).order_by(Lower('name'))\n return render(\n request, 'single_artist.html', {'name': artist.name, 'tracks': tracks}\n )\n","repo_name":"derekjamerson/my_spotify_tool","sub_path":"artists/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"69989769420","text":"import io\nimport numpy as np\nfrom contextlib import redirect_stdout, redirect_stderr\nimport time\n\nfrom ..base import *\nfrom .base import BasePDReactiveODE\nfrom scipy.integrate import solve_ivp, BDF, OdeSolution\nfrom scipy.optimize import OptimizeResult as OdeResult\n\nSCIPY_AVAIL = True\nMESSAGES = {\n 0: \"The solver successfully reached the end of the integration interval.\",\n 1: \"A termination event occurred.\",\n 2: \"The solver failed to converge.\"\n}\n\nclass ScipyPDReactiveODE(BasePDReactiveODE):\n\n def solve(self,T,p,mi0,Cik0,end,**kwargs):\n\n max_steps = kwargs.pop(\"max_steps\", np.inf)\n\n super().set_initial_params(T,p,mi0,Cik0,**kwargs)\n\n method = kwargs.get('method', 'BDF')\n rtol = kwargs.get('rtol', 1.e-5)\n atol = kwargs.get('atol', 1.e-9)\n\n u0 = np.concatenate((mi0,Cik0))\n\n #try:\n #so = io.StringIO()\n #se = io.StringIO()\n #with redirect_stdout(so), redirect_stderr(se):\n tic = time.perf_counter()\n if method == 'BDF_mcm':\n t0 = 0.\n solver = BDF(self.rhs, t0, u0, float(end), rtol=rtol, atol=atol, jac=self.jac)\n ts = [t0]\n ys = [u0]\n interpolants = []\n status = None\n num_steps = 0\n while status is None:\n message = solver.step()\n \n if solver.status == 'finished':\n status = 0\n elif solver.status == 'failed':\n status = -1\n break\n\n if num_steps == max_steps:\n status = 2\n \n t_old = solver.t_old\n t = solver.t\n y = solver.y\n output = solver.dense_output()\n interpolants.append(output)\n ts.append(t)\n ys.append(y)\n num_steps = num_steps + 1\n \n message = MESSAGES.get(status, message)\n ts = np.array(ts)\n ys = np.vstack(ys).T\n ode_solution = OdeSolution(ts, interpolants)\n sol = OdeResult(t=ts, y=ys, sol=ode_solution,\n nfev=solver.nfev, njev=solver.njev, nlu=solver.nlu,\n status=status, message=message, success=status >= 0)\n else:\n sol = solve_ivp(self.rhs, [0,end], u0, dense_output=True, method=method, rtol=rtol, atol=atol, jac=self.jac)\n \n self.sol = sol\n toc = time.perf_counter()\n self.stime = toc-tic\n #self.stdout = so.getvalue()\n #self.stderr = se.getvalue()\n #flag = self.rxn.check_coder_error()\n #if flag!=0:\n # self.sol = None\n # self.excstr = repr(flag)\n # self.rxn.reset_coder_error()\n # except Exception as e:\n # self.sol = None\n # self.excstr = repr(e)\n\n","repo_name":"mitchellmcm27/tcg-ec","sub_path":"systems/ec/python/tcg_slb/phasediagram/scipy.py","file_name":"scipy.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"17227027393","text":"# coding:utf-8\n# version:python3.5.1\n# author:kyh\n# import geodata csv files to cloud database\n\nimport psycopg2\n\n\nclass CloudDatabase(object):\n # Init database and input ip\n # 初始化数据库并传入ip\n def __init__(self, database, user, password, ip=\"127.0.0.1\", port=\"5432\"):\n self.database = database\n self.user = user\n self.password = password\n self.ip = ip\n self.port = port\n\n # Connect database and set input as host, return connection and cursor\n # 连接ip端数据库并返回connection和cursor\n def db_connect(self):\n self.connection = psycopg2.connect(database=self.database, user=self.user,\n password=self.password, host=self.ip, port=self.port)\n self.cursor = self.connection.cursor()\n\n # Write log file\n # 输出日志\n def write_log(self, e):\n self.connection.rollback()\n with open(\"log.txt\", 'a') as log_file:\n log_file.writelines(str(e))\n\n # Copy files to database\n # 将csv文件导入数据库\n def import_csv2db(self, filepath, start, end):\n for i in range(start, end):\n print(i)\n try:\n # 打开文件\n file = open(\"{0}\\\\face{1}.txt\".format(filepath, i), 'r')\n line = file.readline()\n # 起始的SQL语句\n sql_command = \"INSERT INTO face{0} VALUES (\".format(i)\n # 计数\n count = 1\n while line:\n # 构造SQL语句\n line = line.replace('\\t', ',')\n sql_command += \"{0}),(\".format(line.split('\\n')[0])\n # 如果是10000的倍数则提交\n if count % 10000 == 0:\n print(\"{0},{1}\".format(i, count))\n sql_command = sql_command[:-2]\n # 提交数据\n self.cursor.execute(sql_command)\n self.connection.commit()\n # 构造新的SQL语句\n sql_command = \"INSERT INTO face{0} VALUES (\".format(i)\n count += 1\n # 读取下一条数据\n line = file.readline()\n # 全部跑完则将剩余的数据提交\n sql_command = sql_command[:-2]\n self.cursor.execute(sql_command)\n self.connection.commit()\n except Exception as e:\n self.write_log(e)\n\n # Create tables\n # 创建表\n def create_table(self, start, end):\n for i in range(start, end):\n try:\n sql_command = '''\n CREATE TABLE face{0}\n (id BIGINT NOT NULL,\n userid TEXT,\n photo_date_taken DATE,\n photo_date_uploaded BIGINT,\n title TEXT DEFAULT NULL,\n description TEXT DEFAULT NULL,\n user_tags TEXT DEFAULT NULL,\n longitude FLOAT DEFAULT 0,\n latitude FLOAT DEFAULT 0,\n accuracy INTEGER DEFAULT 0,\n download_url TEXT NOT NULL,\n facenum INTEGER,\n happiness FLOAT,\n neutral FLOAT,\n sadness FLOAT,\n disgust FLOAT,\n anger FLOAT,\n fear FLOAT,\n surprise FLOAT,\n facequality_s FLOAT,\n facequality_v FLOAT,\n smile_s FLOAT,\n smile_v FLOAT,\n gender INTEGER,\n ethnicity INTEGER,\n age INTEGER,\n faceid serial PRIMARY KEY \n );\n CREATE INDEX iface_id{0} ON face{0}(id);\n CREATE INDEX iface_date{0} ON face{0}(photo_date_taken);\n '''.format(i)\n self.cursor.execute(sql_command)\n self.connection.commit()\n except Exception as e:\n self.write_log(e)\n\n\nif __name__ == '__main__':\n try:\n # 连接数据库\n # database = CloudDatabase(\"Face\", \"postgres\", \"postgres\", \"47.89.209.207\")\n # 连接本地数据库\n database = CloudDatabase(\"Face\", \"postgres\", \"postgres\", \"127.0.0.1\")\n database.db_connect()\n # 创建照片表\n # start指的是从第几个数据库表开始end表示的是第几个数据库表结束\n start =1\n end = 135\n database.create_table(start, end)\n # 导入csv数据\n database.import_csv2db(r\"D:\\Users\\KYH\\Desktop\\EmotionMap\\FlickrEmotionData\\5face_format\", start, end)\n except Exception as e:\n with open(\"log.txt\", 'a') as log_file:\n log_file.writelines(str(e))\n","repo_name":"FeitengLab/EmotionMap","sub_path":"0All/CollectData/import_to_database.py","file_name":"import_to_database.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"43281985478","text":"\"\"\"\nJava world\n\"\"\"\n\nimport os\nimport sys\nimport logging\n\n# Module JPype1\nimport jpype\nfrom jpype import java\nfrom jpype import javax\n\nimport lib_util\nimport lib_common\n\n\nglob_java_jvm = None\n\n\n# It is possible to return a similar object, but on a remote machine.\ndef JPypeLocalStartJVM():\n global glob_java_jvm\n if glob_java_jvm:\n return glob_java_jvm\n\n try:\n if lib_util.isPlatformLinux:\n glob_java_jvm = _jpype_local_start_jvm_linux()\n\n elif lib_util.isPlatformWindows:\n glob_java_jvm = _jpype_local_start_jvm_windows()\n else:\n lib_common.ErrorMessageHtml(\"Unknown operating system\")\n\n except Exception as exc:\n lib_common.ErrorMessageHtml(\"JavaJmxSystemProperties caught:\" + str(exc))\n\n return glob_java_jvm\n\n\ndef _jpype_local_start_jvm_linux():\n # Example: '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.91-2.b14.fc22.x86_64/jre/lib/amd64/server/libjvm.so'\n dflt_path = jpype.getDefaultJVMPath()\n\n # getDefaultJVMPath=C:\\Program Files\\Java\\jre1.8.0_121\\bin\\server\\jvm.dll\n logging.debug(\"dflt_path=%s\", dflt_path)\n\n # Now extracts the version, which will be used for the JDK directionary.\n base_dflt_jvm = os.path.dirname(dflt_path)\n base_jre_relative = os.path.join( base_dflt_jvm, \"..\", \"..\")\n\n base_jre_abs = os.path.abspath(base_jre_relative)\n # base_jre_abs=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.91-2.b14.fc22.x86_64/jre/lib\n logging.debug(\"base_jre_abs=%s\", base_jre_abs)\n\n java_dir_prefix = os.path.join(base_jre_abs, \"../..\")\n\n # We need to open tools.jar which is in /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.91-2.b14.fc22.x86_64/lib/tools.jar\n # jpype.startJVM(dflt_path,\"-Djava.class.path=/usr/lib ... /tools.jar\")\n jpype.startJVM(dflt_path, \"-Djava.class.path=\" + java_dir_prefix + \"/lib/tools.jar\")\n\n #jvPck = jpype.JPackage('sun').tools.attach.WindowsVirtualMachine\n virtual_machine = jpype.JPackage('com').sun.tools.attach.VirtualMachine\n\n return virtual_machine\n\n\ndef _jpype_local_start_jvm_windows():\n # u'C:\\\\Program Files\\\\Java\\\\jre1.8.0_121\\\\bin\\\\server\\\\jvm.dll'\n dflt_path = jpype.getDefaultJVMPath()\n\n # getDefaultJVMPath=C:\\Program Files\\Java\\jre1.8.0_121\\bin\\server\\jvm.dll\n logging.debug(\"getDefaultJVMPath=%s\", dflt_path)\n\n # Now extracts the version, which will be used for the JDK directionary.\n base_dflt_jvm = os.path.dirname(dflt_path)\n base_jre_relative = os.path.join(base_dflt_jvm, \"..\", \"..\")\n\n base_jre_abs = os.path.abspath(base_jre_relative)\n # base_jre_abs=C:\\Program Files\\Java\\jre1.8.0_121\n logging.debug(\"base_jre_abs=%s\", base_jre_abs)\n\n dir_jre = os.path.basename(base_jre_abs)\n # dir_jre=jre1.8.0_121\n logging.debug(\"dir_jre=%s\", dir_jre)\n\n str_jre = dir_jre[:3]\n if str_jre != \"jre\":\n # Our assumption on the directory syntax is wrong.\n logging.debug(\"Invalid str_jre=%s\", str_jre)\n return None\n\n base_java = os.path.dirname(base_jre_abs)\n dir_jdk = \"jdk\" + dir_jre[3:]\n\n java_dir_prefix = os.path.join(base_java, dir_jdk)\n # java_dir_prefix=C:\\Program Files\\Java\\jdk1.8.0_121\n logging.debug(\"java_dir_prefix=%s\", java_dir_prefix)\n\n os_path = os.environ[\"PATH\"]\n\n # java_dir_prefix = \"C:\\\\Program Files\\\\Java\\\\jdk1.8.0_121\"\n\n # \"attach.dll\" is not in the jre.\n path_attach_dll = java_dir_prefix + \"\\\\jre\\\\bin\"\n\n path_original = os.environ[\"PATH\"]\n\n os.environ[\"PATH\"] = os_path + \";\" + path_attach_dll\n\n # We need to open tools.jar which is in C:\\Program Files\\Java\\jdk1.8.0_121\\lib\n # jpype.startJVM(dflt_path,attachPath,\"-Djava.class.path=C:/Program Files/Java/jdk1.8.0_121/lib/tools.jar\")\n # jpype.startJVM(dflt_path,\"-Djava.class.path=C:/Program Files/Java/jdk1.8.0_121/lib/tools.jar\")\n jpype.startJVM(dflt_path, \"-Djava.class.path=\" + java_dir_prefix + \"\\\\lib\\\\tools.jar\")\n\n #jvPck = jpype.JPackage('sun').tools.attach.WindowsVirtualMachine\n virtual_machine = jpype.JPackage('com').sun.tools.attach.VirtualMachine\n\n os.environ[\"PATH\"] = path_original\n\n return virtual_machine\n\n\ndef _java_jmx_pid_mbeans_attach(pid, jv_pck_vm, mbean_obj_nam=None):\n \"\"\"\n Attaching to a process is riskier, so we do not do it when listing all Java processes.\n This procedure needs to attache and might fail sometimes.\n \"\"\"\n CONNECTOR_ADDRESS = \"com.sun.management.jmxremote.localConnectorAddress\"\n\n dict_result = {}\n\n logging.debug(\"Attaching to pid=%s type=%s\", pid, type(pid))\n # jpype._jexception.AttachNotSupportedExceptionPyRaisable:\n # com.sun.tools.attach.AttachNotSupportedException:\n # Unable to attach to 32-bit process running under WOW64\n #\n # This exception is caught with pytest and many tests.\n # It works fine with few tests or with PyCharm.\n try:\n virt_mach = jv_pck_vm.attach(str(pid))\n except Exception as exc:\n logging.warning(\"Exception:%s\", str(exc))\n return dict_result\n\n logging.debug(\"Attached to pid=%s\", pid)\n connector_address = virt_mach.getAgentProperties().getProperty(CONNECTOR_ADDRESS)\n\n if not connector_address:\n # fileSeparator = \"\\\\\"\n # agent=C:\\Program Files\\Java\\jre1.8.0_121\\lib\\management-agent.jar\n # agent = virt_mach.getSystemProperties().getProperty(\"java.home\") + fileSeparator + \"lib\" + fileSeparator + \"management-agent.jar\"\n\n agent = os.path.join(virt_mach.getSystemProperties().getProperty(\"java.home\"), \"lib\", \"management-agent.jar\")\n\n logging.debug(\"agent=%s\", str(agent))\n virt_mach.loadAgent(agent)\n # agent is started, get the connector address\n connector_address = virt_mach.getAgentProperties().getProperty(CONNECTOR_ADDRESS)\n\n dict_result[\"connector\"] = connector_address\n\n # \"service:jmx:rmi://127.0.0.1/stub/rO0ABXN9AAAAAQ...\"\n\n jmx_url = javax.management.remote.JMXServiceURL(connector_address)\n jmx_soc = javax.management.remote.JMXConnectorFactory.connect(jmx_url)\n # This interface represents a way to talk to an MBean server, whether local or remote.\n # The MBeanServer interface, representing a local MBean server, extends this interface.\n connect_m_bean = jmx_soc.getMBeanServerConnection()\n\n # connect_m_bean=['addNotificationListener', 'class', 'createMBean', 'defaultDomain',\n # 'delegationSubject', 'domains', 'equals', 'getAttribute', 'getAttributes', 'getClass',\n # 'getDefaultDomain', 'getDomains', 'getMBeanCount', 'getMBeanInfo', 'getObjectInstance',\n # 'hashCode', 'invoke', 'isInstanceOf', 'isRegistered', 'mBeanCount', 'notify', 'notifyAll',\n # 'queryMBeans', 'queryNames', 'removeNotificationListener', 'setAttribute', 'setAttributes',\n # 'this$0', 'toString', 'unregisterMBean', 'wait']\n\n # mbeanObjNam = \"com.sun.management:type=HotSpotDiagnostic\"\n if mbean_obj_nam:\n logging.debug(\"mbeanObjNam=%s\", mbean_obj_nam)\n jvx_obj_nam = javax.management.ObjectName(mbean_obj_nam)\n else:\n jvx_obj_nam = None\n\n # jpype._jexception.MalformedObjectNameExceptionPyRaisable: javax.management.MalformedObjectNameException: Key properties cannot be empty\n all_mbeans = connect_m_bean.queryMBeans(jvx_obj_nam, None)\n\n # all_mbeans=[sun.management.OperatingSystemImpl[java.lang:type=OperatingSystem], sun.management.MemoryManagerImpl[java.\n logging.debug(\"all_mbeans=%s\", str(all_mbeans))\n\n vect_mbeans = []\n\n # Gets as much information as possible about this MBean.\n for elt_mbean in all_mbeans:\n mbean_object_name = elt_mbean.getObjectName()\n one_mbean = {\n \"className\": elt_mbean.getClassName(),\n \"objectName\": str(mbean_object_name)\n }\n\n # TODO: To save time, we could do that only if mbeanObjNam is not None.\n one_mbean_info = connect_m_bean.getMBeanInfo(mbean_object_name)\n\n descr_mbean_info = one_mbean_info.getDescriptor()\n dict_mbean_info_descr = {}\n for key_mbean_info in descr_mbean_info.getFieldNames():\n val_m_bean_info = descr_mbean_info.getFieldValue(key_mbean_info)\n dict_mbean_info_descr[key_mbean_info] = val_m_bean_info\n one_mbean[\"info\"] = dict_mbean_info_descr\n\n for attr in one_mbean_info.getAttributes():\n logging.debug(\"attr=%s\", str(attr))\n logging.debug(\"attr.getName()=%s\", attr.getName())\n logging.debug(\"attr.getType()=%s\", attr.getType())\n logging.debug(\"attr.getDescription()=%s\", attr.getDescription())\n\n attrs_mbean_info = one_mbean_info.getAttributes()\n dict_mbean_info = {}\n for one_attr in attrs_mbean_info:\n key_attr = one_attr.getName()\n # int=\\\n get_tp = one_attr.getType()\n try:\n get_attr = connect_m_bean.getAttribute(mbean_object_name, key_attr)\n # Without a concatenation, it prints \"1\" instead of boolean True.\n val_attr = str(get_attr) + \" (%s)\" % get_tp\n except:\n val_attr = \"N/A\"\n dict_mbean_info[key_attr] = val_attr\n one_mbean[\"attrs\"] = dict_mbean_info\n\n vect_mbeans.append(one_mbean)\n\n dict_result[\"all_mbeans\"] = vect_mbeans\n\n # When detaching, all the intermediary objects created by connect_m_bean are deleted.\n # This is why their content must be stored.\n virt_mach.detach()\n\n return dict_result\n\n# https://www.jtips.info/index.php?title=JMX/Remote\n\n\ndef JavaJmxSystemProperties(pid):\n jv_pck_vm = JPypeLocalStartJVM()\n\n try:\n virt_mach = jv_pck_vm.attach(str(pid))\n except Exception as exc:\n vm_sys_props = {\n \"jv_pck_vm\": str(jv_pck_vm),\n \"JMX error\": str(exc),\n \"Pid\": str(pid)}\n return vm_sys_props\n\n try:\n gsp = virt_mach.getSystemProperties()\n vm_sys_props = {}\n\n for k in gsp:\n v = gsp[k]\n vm_sys_props[k] = v\n\n # TODO: Frequent error:\n #\n # (,\n # RuntimeError('No matching overloads found.\n # at native\\common\\jp_method.cpp:117',),\n # \\\n\n virt_mach.detach()\n except Exception as exc:\n vm_sys_props = {\n \"VM\": str(virt_mach),\n \"JMX error\": str(exc),\n \"Pid\": str(pid)}\n\n # Shutdown the VM at the end\n _quiet_shutdown()\n return vm_sys_props\n\n\ndef JPypeListVMs(jv_pck_vm):\n \"\"\"\n This returns a list of processes without attaching to them,\n so it is simpler and faster.\n The result is a map indexed by pids.\n \"\"\"\n resu_procs = dict()\n if not jv_pck_vm:\n return resu_procs\n\n list_vms = jv_pck_vm.list()\n\n logging.debug(\"VirtualMachine.dir=%s\", str(dir(list_vms)))\n for one_vm in list_vms:\n dic_by_props = dict()\n logging.debug(\"%s\", one_vm)\n logging.debug(\"%s\", str(dir(one_vm)))\n logging.debug(\"id=%s\", str(one_vm.id()))\n logging.debug(\"displayName=%s\", str(one_vm.displayName()))\n logging.debug(\"getClass=%s\", str(one_vm.getClass()))\n logging.debug(\"provider=%s\", str(one_vm.provider()))\n logging.debug(\"isAttachable=%s\", str(one_vm.isAttachable()))\n logging.debug(\"toString=%s\", str(one_vm.toString()))\n # JavaJmxPidMBeansAttach(one_vm.id(),jvPckVM)\n\n dic_by_props[\"class\"] = one_vm.getClass()\n dic_by_props[\"provider\"] = one_vm.provider()\n dic_by_props[\"isAttachable\"] = one_vm.isAttachable()\n\n # sun.tools.attach.WindowsAttachProvider@3f99bd52: 8084 sun.tools.jconsole.JConsole\n dic_by_props[\"toString\"] = one_vm.toString()\n\n # Same as \"toString\"\n # dic_by_props[\"str\"] = str(one_vm)\n\n resu_procs[one_vm.id()] = dic_by_props\n\n return resu_procs\n\n\n# This fails on Linux.\n# Better not stopping it because there might be several calls.\n# On Windows, better reusing the same JVM.\ndef _quiet_shutdown():\n return\n # Must redirect the Java output\n # Shutdown the VM at the end\n if not lib_util.isPlatformLinux:\n jpype.shutdownJVM()\n\n\n# TODO: This could work on a remote machine if we have the Java RMI port number and user/pass.\ndef ListJavaProcesses():\n jv_pck_vm = JPypeLocalStartJVM()\n\n list_vms = JPypeListVMs(jv_pck_vm)\n\n # Shutdown the VM at the end\n _quiet_shutdown()\n\n return list_vms\n\n\n# TODO: This could work on a remote machine if we have the Java RMI port number and user/pass.\n# If mbeanObjNam is None, returns data for all MBeans.\ndef GetJavaDataFromJmx(the_pid, mbean_obj_nam=None):\n jv_pck_vm = JPypeLocalStartJVM()\n\n java_results = _java_jmx_pid_mbeans_attach(the_pid, jv_pck_vm, mbean_obj_nam)\n\n # Some extra data to add ??\n # jvValDict = jv_pck_vm[thePid]\n # for jvKey in jv_pck_vm:\n\n # Shutdown the VM at the end\n _quiet_shutdown()\n\n return java_results\n\n\n# Development notes:\n#\n# https://stackoverflow.com/questions/10331189/how-to-find-the-default-jmx-port-number\n# C:\\Users\\xxyyzz>jvisualvm\n# The launcher has determined that the parent process has a console and will reuse it for its own console output.\n# Closing the console will result in termination of the running program.\n# Use '--console suppress' to suppress console output.\n# Use '--console new' to create a separate console window.\n#\n#\n# # Start this command on both machines. Notepad is a simple app. Security disabled.\n# java -Dcom.sun.management.jmxremote \\\n# -Dcom.sun.management.jmxremote.port=9010 \\\n# -Dcom.sun.management.jmxremote.local.only=false \\\n# -Dcom.sun.management.jmxremote.authenticate=false \\\n# -Dcom.sun.management.jmxremote.ssl=false -jar Notepad.jar\n#\n# jconsole usable on Windows (192.168.0.14) and Linux (192.168.0.17)\n# Start it in remote mode with port 9010.\n#\n# Problem: How can we have the list of remote machines running on a remote host?\n# Do they all have a distinct port number ?\n# Can we share this port number ?\n#\n# https://www.optiv.com/blog/exploiting-jmx-rmi\n#\n#\n# Credentials would be like: \"JMI\" : { \"192.168.0.14:9010\" : ( \"user\", \"pass\" ) }\n#\n#\n","repo_name":"rchateauneu/survol","sub_path":"survol/sources_types/java/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":14091,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"46"} +{"seq_id":"827022745","text":"import torch\nimport torchvision\nimport torch.nn as nn\nfrom model import simple_CNN\n\n################################### Dataset #######################################\n\nmnist_train = torchvision.datasets.MNIST('.',train=True, download=True)\nx_train_dataset = mnist_train.train_data\ny_train_dataset = mnist_train.train_labels\n\ntrain_dataset = torch.utils.data.TensorDataset(x_train_dataset, y_train_dataset)\ntrain_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)\n\nmnist_test = torchvision.datasets.MNIST('.',train=False)\nx_test_dataset = mnist_test.test_data\ny_test_dataset = mnist_test.test_labels\n\ntest_dataset = torch.utils.data.TensorDataset(x_test_dataset, y_test_dataset)\ntest_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=True)\n\n################################### Model #######################################\nmodel = simple_CNN.simpleCNN()\nmodel.train()\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\nfor epoch in range(10):\n loss_num = 0.0\n for i, (data, label) in enumerate(train_loader):\n data = data.unsqueeze(dim=1).float()\n pred = model(data)\n loss = criterion(pred, label)\n loss_num += loss\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(loss_num / len(train_loader))","repo_name":"LeeChanHyuk/Machine_Learning_Practice","sub_path":"simpleCNN_training.py","file_name":"simpleCNN_training.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"19554536502","text":"import numpy as np\nfrom sklearn.datasets import make_sparse_spd_matrix\nfrom scipy.spatial import distance\n\ndef permutation_topics(B_true, B_sampled):\n B_urn = B_sampled.copy()\n k, V = B_true.shape\n B_urn[:, k+1] = range(k)\n permutation = []\n for i in range(k):\n distances = []\n for j in range(k-i):\n distances.append(np.linalg.norm(B_true[i] - B_urn[j]))\n min_index = np.argmin(distances)\n permutation.append(B_urn[min_index, k+1])\n B_urn = np.delete(B_urn, min_index, 0)\n return permutation\n\n\ndef _fake_log(*args, **kwargs):\n return\n\n\ndef graph_loss(G_true, G_sampled, permutation, debug=False):\n log = print if debug else _fake_log # turn log off/on\n G_perm = G_sampled.copy()\n log(\"BEFORE FOR\")\n for i in range(G_true.shape[0]): # Number of rows\n p = int(permutation[i])\n G_perm[i] = G_sampled[p]\n log(\"G_PERM\")\n log(G_perm)\n G_perm[:,i] = G_sampled[:,p]\n log(\"G_SAMPLED\")\n log(G_sampled)\n log(\"NEW RETURN\")\n log(abs(G_true-G_perm).sum())\n return abs(G_true-G_perm).sum()\n\n","repo_name":"fpjaa/BAY-STATS-Project-ABDPPS","sub_path":"graph_loss.py","file_name":"graph_loss.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"43249086706","text":"import json\nimport os\n\nfrom faker import Faker\nfrom locust import HttpLocust, TaskSet, task\n\n\nclass APIClientBehaviour(TaskSet):\n\n def __int__(self, parent):\n self.base_url = \"/api/v1\"\n self.charset = \"utf-8\"\n self.mimetype = \"application/json\"\n super(APIClientBehaviour, self).__int__(parent)\n\n def on_start(self):\n # Get user login token\n self.token = self.login_user()\n self.fake = Faker()\n\n def login_user(self):\n # login user\n data = self.client.post(\n '/api/v1/auth/login/',\n data=json.dumps({\n \"email\": \"samkaris75@gmail.com\",\n \"password\": \"TestPass1#\"}),\n headers={'Content-Type': 'application/json'},\n name='login'\n )\n return json.loads(data._content)['user_data']['access_token']\n\n @task(1)\n def index(self):\n # Test index route\n self.client.get('/', name='index')\n\n @task(2)\n def _login(self):\n # Test login route\n self.login_user()\n\n @task(2)\n def book_flight(self):\n data = {\n \"departure_date\": self.fake.date(),\n \"seat_number\": \"BW12\"\n }\n self.client.post(\n '/api/v1/flights/b7oio10at/tickets/',\n data=json.dumps(data),\n headers={\n 'Content-Type': 'application/json',\n 'Authorization': f'Bearer {self.token}'\n },\n name='Book ticket'\n )\n\n\nclass WebsiteUser(HttpLocust):\n host = os.getenv('LOCUST_HOST')\n task_set = APIClientBehaviour\n min_wait = 5000\n max_wait = 15000\n","repo_name":"sam-karis/airtech","sub_path":"locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"12889469786","text":"class Solution:\n def maxArea(self, height: List[int]) -> int:\n '''area=0\n current=0\n if len(height)==0:\n return 0\n elif len(height)==1:\n return height[0]\n elif len(height)==2:\n return min(height)\n #keep on adding until the next element is greater than the current\n else:\n for i in range(len(height[1:])):\n if height[current]>=height[i]:\n area+=height[current]\n else:\n current+=1\n return area\n ''' \n area=0\n i=0\n j=len(height)-1\n while(iheight[j]:\n j-=1\n else:\n i+=1\n return area\n","repo_name":"SaloniGandhi/leetcode","sub_path":"containerwithmostwater.py","file_name":"containerwithmostwater.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"37693628764","text":"from collections import *\n\n\"\"\"\n 1st: hashtable\n\n Time O(N)\n Space O(N)\n 119 ms, faster than 23.53%\n\"\"\"\n\n\nclass Solution:\n def mostFrequent(self, nums: List[int], key: int) -> int:\n cntr = Counter()\n for i in range(len(nums)-1):\n cur = nums[i]\n nxt = nums[i+1]\n if cur == key:\n cntr[nxt] += 1\n k, v = cntr.most_common(1)[0]\n return k\n","repo_name":"parasiitism/AlgoDaily","sub_path":"leetcode/2190-most-frequent-number-following-key-in-an-array/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"37888804410","text":"import pandas as pd \ndict={'x':[1,2,3,4],\n 'v':[5,6,7,8]}\n\n# print dict['x'][1]\n\n\ndf=pd.DataFrame(dict)\n# print (df)\n\n# print(df.shape)\nrows,cols=df.shape\n# returns a tuple\n# print(rows)\n\n","repo_name":"dniboghgnis/New_additions","sub_path":"t01.py","file_name":"t01.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"70112977099","text":"from __future__ import absolute_import, division, print_function\nimport os, sys\nimport libtbx.load_env\nfrom libtbx import easy_run\nfrom libtbx.test_utils import open_tmp_file, open_tmp_directory\n\ndef run(args):\n tmp_dir = open_tmp_directory(suffix=\"example_cif_parser\")\n cur_dir = os.path.abspath(os.path.curdir)\n os.chdir(os.path.abspath(tmp_dir))\n try:\n exercise_compilation()\n finally:\n os.chdir(cur_dir)\n\ndef exercise_compilation():\n ucif_dist = libtbx.env.dist_path(module_name=\"ucif\")\n antlr3_dist = libtbx.env.under_dist(\"ucif\", \"antlr3\")\n os.environ[\"LIBTBX_UCIF\"] = ucif_dist\n os.environ[\"LIBTBX_ANTLR3\"] = antlr3_dist\n assert ucif_dist.find('\"') < 0\n if sys.platform == \"win32\":\n cmd = '\"%s/examples/build_cif_parser.bat\"' %ucif_dist\n ext = \".exe\"\n else:\n cmd = '\"%s/examples/build_cif_parser.sh\"' %ucif_dist\n ext = \"\"\n result = easy_run.fully_buffered(cmd)\n if result.return_code:\n if len(result.stderr_lines) > 0:\n raise RuntimeError(result.show_stderr())\n raise RuntimeError(result.show_stdout())\n assert os.path.exists(\"cif_parser\"+ext)\n f = open_tmp_file(suffix=\".cif\")\n f.write(cif_string)\n f.close()\n cmd = 'cif_parser \"%s\"' %f.name\n cmd = os.path.join(\".\", cmd)\n r = easy_run.fully_buffered(cmd).raise_if_errors()\n assert r.stdout_lines[0].startswith(\"Congratulations!\")\n\ncif_string = \"\"\"\\\ndata_a\n_a 1\n_b 2\n\"\"\"\n\nif __name__ == '__main__':\n run(sys.argv[1:])\n print(\"OK\")\n","repo_name":"cctbx/cctbx_project","sub_path":"iotbx/cif/tests/tst_ucif_examples_compilation.py","file_name":"tst_ucif_examples_compilation.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"46"} +{"seq_id":"10890748592","text":"from marshmallow import fields, Schema\n\n\nclass LogFileGroupsSchema(Schema):\n starting_date = fields.Date(\n required=False,\n description='Only return log files from this starting date'\n )\n modified_after = fields.Integer(\n required=False,\n description='Only return log files modified after this timestamp'\n )\n\n\nclass LogFileDetails(Schema):\n last_modified = fields.Integer(\n description='Timestamp of when log was last modified'\n )\n screen_identifier = fields.String(\n description='Screen identifier'\n )\n dnqualifier = fields.String(\n description='Dnqualifier of the screen server the log file comes from'\n )\n\n\nclass LogFileGroupsResponseSchema(Schema):\n data = fields.Dict(\n keys=fields.Date(),\n values=fields.Dict(\n keys=fields.UUID(),\n values=fields.Nested(LogFileDetails),\n description='Dictionary where keys are log file UUIDs and values are partial log file details.' # noqa\n ),\n description='Dictionary where keys are Dates and values are dictionaries.' # noqa\n )\n\n\nclass LogFileRawSchema(Schema):\n log_file_uuids = fields.List(\n fields.UUID(),\n description='Filter logs by uuid.'\n )\n dates = fields.List(\n fields.Date(),\n description='Filter logs by date.'\n )\n screen_identifiers = fields.List(\n fields.String(),\n description='Filter logs by screen_identifier.'\n )\n dnqualifiers = fields.List(\n fields.String(),\n description='Filter logs by dnqualifiers.'\n )\n modified_after = fields.Integer(\n required=False,\n description='Only return log files modified after this timestamp'\n )\n\n\nclass LogFileRawDetails(Schema):\n uuid = fields.UUID(description='Primary identifier of a log file.')\n created = fields.Integer()\n dnqualifier = fields.String(\n description='Dnqualifier of the screen server.'\n )\n error_message = fields.String(\n description='Error occured in collection or parsing phase.'\n )\n signed = fields.Boolean()\n screen_identifier = fields.String(description='Screen identifier e.g. S01')\n absolute_file_path = fields.String(description='Absolute file path')\n unencrypted = fields.Boolean()\n device_ip_address = fields.String(description='Screen server IP address.')\n last_modified = fields.Integer()\n repull_marked = fields.Boolean()\n date = fields.String(description='Log file date.')\n serial = fields.String(description='Screen server serial number.')\n parse_attempted = fields.Boolean()\n pulled = fields.Boolean()\n parsed = fields.Boolean()\n pull_attempted = fields.Boolean()\n no_playouts = fields.Boolean()\n xml = fields.String(description='Full .xml file as retrieved from server.')\n\n\nclass LogFileRawResponseSchema(Schema):\n data = fields.Dict(\n keys=fields.UUID(description='Primary Identifier of the log file.'),\n values=fields.Nested(LogFileRawDetails),\n description='Dictionary where keys are log file UUIDs and values are log files details.' # noqa\n )\n","repo_name":"mcanetti/screenwriter_api_requirements","sub_path":"serv/api/core/logging/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"39339171708","text":"import streamlit as st\nimport pickle\nimport numpy as np\nimport pandas as pd\n\ntypes = ['Boating', 'Invalid', 'Provoked', 'Sea Disaster', 'Unprovoked']\nactivities = ['Fishing', 'Other', 'Scuba diving', 'Snorkeling',\n 'Spearfishing', 'Standing', 'Surfing', 'Swimming', 'Wading', 'boarding']\nsex = ['F', 'M']\nspecies = ['angel shark', 'blacktip reef', 'blacktip shark', 'blue shark',\n 'bonita sharkk', 'bull shark', 'dogfish shark', 'foot shark',\n 'galapagos shark', 'hammerhead shark', 'juvenile shark', 'juvenile tiger',\n 'lb dog', 'lb reef', 'lb sand', 'lemon shark', 'mako shark', 'nurse shark',\n 'reef shark', 'sand shark', 'sandbar shark', 'sandshark', 'sandtiger shark',\n 'sevengill shark', 'shark species', 'shark with', 'sharks', 'spinner shark',\n 'thresher', 'thresher shark', 'tiger shark', 'unidentified', 'unknown',\n 'white shark']\n\nmodel = pickle.load(open(\"rf.pkl\", \"rb\"))\n\nst.title(\"Shark Attack Prediction\")\na1 = st.selectbox(\"Types\", types)\na2 = st.selectbox(\"Activity\", activities)\na3 = st.selectbox(\"Gender\", sex)\na4 = st.number_input(\"Age\")\na5 = st.selectbox(\"Species\", species)\n\nif st.button(\"Predict\"):\n a1 = types.index(a1)\n a2 = activities.index(a2)\n a3 = sex.index(a3)\n a5 = species.index(a5)\n test = np.array([[a1, a2, a3, a4, a5]])\n res = model.predict(test)\n print(res)\n st.success(\"Is Attacked: \" + str(res[0]))\n","repo_name":"thealper2/Shark-Attack-Prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28283005395","text":"\n\ndef ftp8_all(m=4,n=4):\n expo = [i for i in range(2**m)]\n logd = [i for i in range(2**n)]\n\n ftps = []\n for e in expo:\n for d in logd:\n t = [ 2**(-i-1) * (1 if d & (2**b) else 0) for i, b in enumerate(range(n-1, -1, -1))]\n ftp = 2**(-e) * (1+sum(t))\n if ftp > 1:\n print(f'info: e={e}, d={d}')\n ftps.append(ftp)\n\n return sorted(ftps)\n\n\ndef main():\n ftps = ftp8_all()\n for f in ftps:\n print('{:14.15f}'.format(f))\n print(f'Total = {len(ftps)}')\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"bohanxyz/amath584","sub_path":"ftp8.py","file_name":"ftp8.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"35057138247","text":"\"\"\"Util functions for evaluating models.\"\"\"\n\nimport os\nimport csv\nimport json\nimport torch\nfrom tqdm import tqdm\nimport numpy as np\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.metrics import RocCurveDisplay\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import classification_report\nimport matplotlib.pyplot as plt\n\nfrom data.dataset import eval_folder\nfrom models.conv_net import simpleConvNet\nfrom utils.mil_utils import mil_model_wrapper\n\n\ndef load_model_clf(MODEL_WEIGHTS_PATH, device):\n \"\"\"\n Saved weights are for utils.mil_utils.mil_model_wrapper\n class objects. Here we load them into a suitable class object\n and then recover the model.model classification model inside.\n \"\"\"\n model = simpleConvNet()\n model = mil_model_wrapper(model.to(device))\n model.load_state_dict(torch.load(MODEL_WEIGHTS_PATH, map_location = device))\n return model.model\n\n\nclass get_batch_video_probabs(nn.Module):\n \"\"\"Get output scores and labels of a batch of videos.\"\"\"\n def __init__(self, device, max_video_frames):\n super().__init__()\n self.device = device\n self.max_video_frames = max_video_frames\n\n def forward(self, outputs, labels, len_frames):\n frames_mask = torch.arange(self.max_video_frames, device = self.device)[None, :] < len_frames\n output_masked = outputs * frames_mask.int().float()\n video_probab_scores = torch.amax(output_masked, dim = 1)\n\n return video_probab_scores.tolist(), labels[:, 0].tolist()\n\n\ndef get_all_video_probabs(model, dataloader, device, max_video_frames):\n \"\"\"Get output scores and labels of all videos in the dataloader using batch evaluation.\"\"\"\n\n model.eval()\n all_paths = []\n all_labels = []\n all_scores = []\n criterion = get_batch_video_probabs(device, max_video_frames)\n\n with tqdm(dataloader, unit = \"batch\", leave = True) as tqdm_progressbar:\n for batch in tqdm_progressbar:\n inputs, labels, vidlens, vidpaths = batch['X'], batch['Y'], batch['len'], batch['path']\n\n tqdm_progressbar.set_description(f\"Getting video predictions\")\n\n inputs, labels, vidlens = inputs.to(device), labels.to(device), vidlens.to(device)\n inputs, labels, vidlens = inputs.to(torch.float32), labels.to(torch.float32), vidlens.to(torch.float32)\n\n outputs = model(inputs).to(torch.float32)\n batch_preds, batch_labels = criterion(outputs, labels, vidlens)\n\n all_paths.extend(vidpaths)\n all_scores.extend(batch_preds)\n all_labels.extend(batch_labels) # ADDED\n\n return all_scores, all_labels, all_paths\n\n\ndef get_optimal_threshold(model, val_path, transform, max_video_frames, batch_size, device):\n \"\"\"Returns optimal threshold based on (TPR - FPR) score on validation set.\"\"\"\n\n # get dataset, dataloader\n dataset = eval_folder(val_path, transform, max_video_frames)\n dataloader = DataLoader(dataset = dataset, batch_size = batch_size, shuffle = True)\n\n # criteria\n scores, labels, paths = get_all_video_probabs(model, dataloader, device, max_video_frames)\n\n # account for (basket) undetected videos (tjese wo;; )\n augment_scores, augment_labels, augment_paths = dataset.get_basket_undetected_videos()\n scores, labels, paths = scores + augment_scores, labels + augment_labels, paths + augment_paths\n\n # decide optimal threshold (TPR - FPR)\n fpr, tpr, thresholds = roc_curve(labels, scores)\n optimal_idx = np.argmax(tpr - fpr)\n optimal_threshold = thresholds[optimal_idx]\n\n return optimal_threshold\n\n\ndef print_classification_metrics(\n model, test_path, transform, max_video_frames, batch_size, device, save_results_dir = None, threshold = None, val_path = None\n):\n \"\"\"Prints and saves classification metrics - ROC curve, AUROC score, predictions as csv file etc.\"\"\"\n\n # get dataset, dataloader\n dataset = eval_folder(test_path, transform, max_video_frames)\n dataloader = DataLoader(dataset = dataset, batch_size = batch_size, shuffle = True)\n\n # criteria\n scores, labels, paths = get_all_video_probabs(model, dataloader, device, max_video_frames)\n\n # account for (basket) undetected videos (tjese wo;; )\n augment_scores, augment_labels, augment_paths = dataset.get_basket_undetected_videos()\n scores, labels, paths = scores + augment_scores, labels + augment_labels, paths + augment_paths\n\n # show ROC curve\n auroc_score = roc_auc_score(labels, scores)\n print(\"AUROC score is\", auroc_score)\n RocCurveDisplay.from_predictions(labels, scores)\n plt.show()\n\n if save_results_dir:\n plt.savefig(os.path.join(save_results_dir, 'roc_curve.jpg'))\n\n if threshold is None: # when calling from src/train.py\n threshold = get_optimal_threshold(model, val_path, transform, max_video_frames, batch_size, device)\n\n # get classification report\n print(\"Threshold is\", threshold)\n y_pred_class = list(map(int, np.array(scores) > threshold))\n report = classification_report(labels, y_pred_class)\n print(report)\n\n # save classification report\n if save_results_dir:\n\n results_dict = {\n 'dataset': test_path,\n 'auroc_score': auroc_score,\n 'threshold': threshold,\n 'classification_report': report\n }\n with open(os.path.join(save_results_dir, 'results.json'), 'w') as f:\n json.dump(results_dict, f)\n\n # save prediction csv\n predictions_rows = list(zip(paths, scores, y_pred_class, labels))\n with open(os.path.join(save_results_dir, \"predictions.csv\"), 'w', newline = '') as f:\n writer = csv.writer(f)\n writer.writerow(['path', 'score', 'prediction', 'target'])\n writer.writerows(predictions_rows)\n\n print(f\"\\nResults saved in {save_results_dir}\")\n","repo_name":"jonathanvevance/basketball_scoring_detection","sub_path":"src/utils/eval_utils.py","file_name":"eval_utils.py","file_ext":"py","file_size_in_byte":5886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"74369210378","text":"from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport sys\n\napp = QApplication(sys.argv)\nwindow = QWidget()\nwindow.setWindowTitle('Hello PyQT')\nwindow.setWindowIcon(QIcon('pyqt.png'))\nwindow.setGeometry(50, 50, 600, 400)\n\npb = QProgressBar(window)\npb.move(200, 180)\npb.resize(300, 50)\npb.setValue(20)\npb.setAlignment(Qt.AlignCenter)\n\n\n\nwindow.show()\napp.exec_()","repo_name":"franksalas/pyqtDev","sub_path":"Files/PyQt Widgets/11_Progressbar.py","file_name":"11_Progressbar.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"39114658927","text":"from dialogflow import ContextsClient, SessionsClient, types\nfrom google.protobuf import struct_pb2\n\n\nclass DialogflowClient:\n \"\"\"\n A simplified client to talk to a dialogflow\n\n >>> bot = DialogflowClient('')\n >>> response = bot.ask('1234', 'Hi!')\n >>> response.query_result.fulfillment_text\n 'Good day! What can I do for you today?'\n \"\"\"\n\n def __init__(self, project_id):\n \"\"\"\n Initialize a Dialogflow ContextsClient/SessionsClient. See\n https://dialogflow.com/docs/reference/v2-auth-setup to setup credentials.\n\n :param project_id: The project id\n \"\"\"\n\n self.context_client = ContextsClient()\n self.session_client = SessionsClient()\n self.project_id = project_id\n\n def create_context(self, conversation_id, context_name, attributes):\n \"\"\"\n Adds attributes to Dialogflow context. The attributes can be used in Dialogflow responses by referencing eg.\n #contact_profile.primaryIdentifier\n\n :param conversation_id: The conversation id is used as session id in Dialogflow to differentiate different\n conversations going on\n :param context_name: The name you want to use in Dialogflow to reference the attributes (eg contact_profile)\n :param attributes: A dictionary of key/values to be uploaded to Dialogflow\n \"\"\"\n\n parent = self.context_client.session_path(self.project_id, conversation_id)\n parameters = struct_pb2.Struct()\n for key, value in attributes.items():\n parameters[key] = value\n\n self.context_client.create_context(\n parent,\n types.Context(\n name=f\"projects/{self.project_id}/agent/sessions/{conversation_id}/contexts/{context_name}\",\n lifespan_count=5,\n parameters=parameters,\n ),\n )\n\n def ask(self, conversation_id, question):\n \"\"\"\n Pass a text message to Dialogflow and get back a DetectIntentResponse\n (https://cloud.google.com/dialogflow-enterprise/docs/reference/rpc/google.cloud.dialogflow.v2#detectintentresponse)\n\n :param conversation_id: The conversation id is used as session id in Dialogflow to differentiate different\n conversations going on\n :param question: The text the contact sent\n :return: DetectIntentResponse containing the answer from the bot and other metadata.\n \"\"\"\n\n session = self.session_client.session_path(self.project_id, conversation_id)\n query_input = types.QueryInput(\n text=types.TextInput(text=question, language_code=\"en-US\")\n )\n return self.session_client.detect_intent(session, query_input)\n","repo_name":"sparkcentral/virtual-agent-api-example","sub_path":"app/dialogflow_integration.py","file_name":"dialogflow_integration.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"17940388488","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n# Djang-registration\nfrom registration.views import RegistrationView as BaseRegistrationView\nfrom django.contrib.auth import authenticate, get_user_model, login\nfrom registration import signals\nfrom models import GreenEq, RedEq, YellowEq, BlueEq, Rattrapage, DysStandard, DysCritique, ColorChoiceForm\n\nUser = get_user_model()\n\n\nclass RegistrationView(BaseRegistrationView):\n\n def register(self, form):\n new_user = form.save()\n new_user = authenticate(\n username=getattr(new_user, User.USERNAME_FIELD),\n password=form.cleaned_data['password1']\n )\n login(self.request, new_user)\n signals.user_registered.send(sender=self.__class__,\n user=new_user,\n request=self.request)\n return new_user\n\n def get_success_url(self, user):\n return '/server'\n\n# =================================================================\n\n\ndef server(request):\n if request.method == \"POST\":\n form = ColorChoiceForm(request.POST)\n if form.is_valid():\n post.save()\n return redirect('/user')\n else:\n form = ColorChoiceForm()\n return render(request, \"server.html\", {'form': form})\n\n\n@login_required(login_url='/accounts/login/')\ndef user(request):\n nombre = 4\n carte_standard = DysStandard.objects.order_by('?').first()\n carte_critique = DysCritique.objects.order_by('?').first()\n carte_rattrapage = Rattrapage.objects.order_by('?').first()\n return render(request, \"user.html\", {\"carte_standard\":carte_standard, \"carte_critique\":carte_critique, \"carte_rattrapage\":carte_rattrapage, \"nombre\":nombre} )\n\n@login_required(login_url='/accounts/login/')\n\ndef animateur(request):\n nombre = 1\n carte_standard = DysStandard.objects.order_by('?').first()\n carte_critique = DysCritique.objects.order_by('?').first()\n carte_rattrapage = Rattrapage.objects.order_by('?').first()\n\n return render(request, \"animateur.html\", {\"carte_standard\":carte_standard, \"carte_critique\":carte_critique, \"carte_rattrapage\":carte_rattrapage, \"nombre\":nombre} )\n","repo_name":"vniati/permis_de_produire","sub_path":"PdP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"27653045059","text":"# coding:utf-8\nimport tensorflow as tf\nfrom tqdm import tqdm\n\ntensor = tf.data.Dataset.from_tensor_slices(list(range(100)))\n\nfor t in tensor.repeat(100).shuffle(100).padded_batch(7):\n print(t.numpy())\n\nnew_tensor = tensor.map(lambda x: x**2).filter(lambda x: x%2==0)\nprint(new_tensor.prefetch())\n\n\nflowers = tf.keras.utils.get_file(\n 'flower_photos',\n 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz',\n untar=True)\n\nprint(flowers)\n\nexample = tf.train.Example()\nprint(example.features.feature['test'])\n\n","repo_name":"tracholar/ml-homework-cz","sub_path":"advance-tensorflow/tracholar/tf_data_demo.py","file_name":"tf_data_demo.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"46"} +{"seq_id":"41408890051","text":"# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtCore import Qt, pyqtSignal, pyqtSlot\nfrom PyQt5.QtGui import QIcon, QCursor\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QMessageBox, QGraphicsBlurEffect, QSpacerItem, QLineEdit, QGroupBox, QRadioButton, QLabel, QComboBox\n\n\nfrom Core.Models.Worker import Usuarios\nfrom Core.Utils.utils import enabled_elements\nfrom Login_System.Verifications_and_Responses.Responses import Responses\nfrom Login_System.Verifications_and_Responses.Verifications import Verifications\nfrom ui.pages.windowAllArendators import AllArendatorsWindow\n\nfrom ui.pages.windowArendator import ArendatorWindow\nfrom ui.pages.windowHistoryPayments import HistoryPaymentsWindow\nfrom ui.widjets.baseItemArendator_widget import BaseItemArendatorWidget\nfrom ui.widjets.baseMenu_widget import ListBtnMenu\nfrom ui.widjets.formCharts_widget import BaseFormCharts\n\nfrom ui.windows.ui_MainWindow import Ui_MainWindow\n\n\n# for w in QtWidgets.QApplication.topLevelWidgets():\n# \tif isinstance(w, QtWidgets.QMessageBox) and w.parent() == self:\n# \t\tfor button in w.buttons():\n# \t\t\tbutton.setCursor(QCursor(Qt.PointingHandCursor))\n# \telif isinstance(w, QtWidgets.QFileDialog) and w.parent() == self:\n# \t\tfor button in w.findChildren(QtWidgets.QPushButton):\n# \t\t\tbutton.setCursor(QCursor(Qt.PointingHandCursor))\n\n\nclass MainBaseWindow(QMainWindow):\n\tresizeBase = pyqtSignal()\n\n\tdef __init__(self, user_fields=None, parent=None):\n\t\tsuper(MainBaseWindow, self).__init__(parent)\n\t\tself.ui = Ui_MainWindow()\n\t\tself.ui.setupUi(self)\n\t\tself.statusBar().setStyleSheet('color: #ffffff;')\n\t\tself.setWindowIcon(QIcon(\":/Images/logo-mini.png\"))\n\t\tself.setWindowTitle(f'Программа по учету ТС и заключенных договорах')\n\n\n\t\t# Выравнивание окна по центру монитора\n\t\tdesktop = QApplication.desktop()\n\t\tx = (desktop.width() - self.frameSize().width()) // 2\n\t\ty = (desktop.height() - self.frameSize().height()) // 2\n\t\tself.move(x, y)\n\n\t\tself.effect = QGraphicsBlurEffect()\n\t\tself.effect.setBlurRadius(4)\n\t\tself.setGraphicsEffect(self.effect)\n\t\tself.effect.setEnabled(False)\n\n\t\tself.parent = parent\n\n\t\tself.count_item_base_list_arendator: int = 0\n\n\t\tself.responses = Responses()\n\t\tself.verify = Verifications()\n\n\t\tself.users_db = Usuarios()\n\n\t\tself.user_fields = user_fields\n\t\tself.role = self.user_fields.role\n\t\tself.role_name = self.user_fields.role_name\n\t\tself.username = self.user_fields.username\n\t\tself.name = self.user_fields.name\n\n\t\tself.ui.lbl_name_user.setText(self.name)\n\t\tself.ui.lbl_role_name.setText(self.role_name)\n\t\tself.ui.role.setText(self.role)\n\n\t\tself.window_add_arendator = ArendatorWindow(parent=self)\n\n\t\t# enabled_elements(role = self.role, element = self.ui.btn_user, hideted = 'yes')\n\t\tenabled_elements(role = self.role, element = self.ui.groupBox, hideted = 'yes')\n\n\t\tself.ui.btn_hamburger_menu.clicked.connect(self.menu_humburger)\n\t\t# self.ui.btn_user.clicked.connect(self.user_managers)\n\n\t\tself.base_list_arendators = self.users_db.base_list_arendators()\n\t\tself.loadingBaseListArendators(self.base_list_arendators)\n\n\n\t\tself.ui.cb_arendators.addItem('')\n\t\tself.ui.cb_arendators.addItem(f'ФИО от А до Я', 'surname___asc')\n\t\tself.ui.cb_arendators.addItem(f'ФИО от Я до А', 'surname___desc')\n\t\tself.ui.cb_arendators.addItem(f'Дата регистрации (по убывающей)', '___max_date')\n\t\tself.ui.cb_arendators.addItem(f'Дата регистрации (по возрастающей)', '___min_date')\n\n\t\tself.ui.surname___text.setPlaceholderText(u'Поиск клиента...')\n\t\tself.ui.surname___text.textChanged.connect(self.filters_in_db_arendators)\n\n\t\tself.ui.hiden_arendator__all.toggled.connect(self.filters_in_db_arendators)\n\t\tself.ui.hiden_arendator__active.toggled.connect(self.filters_in_db_arendators)\n\t\tself.ui.hiden_arendator__hidened.toggled.connect(self.filters_in_db_arendators)\n\t\tself.ui.cb_arendators.textActivated.connect(self.filters_in_db_arendators)\n\n\n\t\tif self.role in ['root', 'admin']:\n\t\t\twidth_lbl = self.ui.lbl_indent_elements.width()\n\t\t\tself.ui.lbl_indent_elements.setMinimumWidth(int(width_lbl + 45))\n\n\t\tself.ui.tabWidget.setCurrentIndex(1)\n\n\t\tself.all_arendators = AllArendatorsWindow(parent = self)\n\t\tself.ui.layout_arendators_window.addWidget(self.all_arendators)\n\n\t\tself.chaarts = BaseFormCharts(parent=self)\n\t\tself.ui.charts_layout.addWidget(self.chaarts)\n\n\t\tself.ui.tabWidget.blockSignals(True)\n\t\tself.ui.tabWidget.currentChanged.connect(self.selectorTabBase)\n\t\tself.ui.tabWidget.blockSignals(False)\n\n\tdef create_data_charts(self):\n\t\t# print(f'Из create_data_charts: {self.users_db.charts_data()}')\n\t\tresult = self.users_db.charts_data()\n\t\treturn result\n\n\tdef selectorTabBase(self, selected_index):\n\t\tprint(f'Selected: {selected_index}')\n\t\tif selected_index == 0:\n\t\t\t# self.chaarts.dataChanged.connect(self.chaarts.fierst_chart)\n\t\t\tself.chaarts.updater()\n\t\telif selected_index == 1:\n\t\t\tpass\n\t\telif selected_index == 2:\n\t\t\tpass\n\n\n\tdef menu_humburger(self):\n\t\tif self.ui.btn_hamburger_menu.isChecked():\n\t\t\tself.hamburger = ListBtnMenu(self, self.ui.frm_top, role=self.role)\n\t\t\tself.hamburger.resize_menu()\n\t\t\tself.hamburger.show()\n\t\telse:\n\t\t\tself.hamburger.close()\n\n\tdef keyPressEvent(self, e):\n\t\tif self.ui.btn_hamburger_menu.isChecked():\n\t\t\tself.ui.btn_hamburger_menu.setChecked(False)\n\t\tif e.key() == Qt.Key_Escape:\n\t\t\tself.hamburger.close()\n\n\tdef resizeEvent(self, event):\n\t\tself.resizeBase.emit()\n\t\treturn super(MainBaseWindow, self).resizeEvent(event)\n\n\tdef closeEvent (self, event):\n\t\tfor window in QApplication.topLevelWidgets():\n\t\t\twindow.close()\n\n\n\tdef filters_in_db_arendators(self):\n\n\t\tfilter_layout = {}\n\t\tdef search_filter(layout):\n\t\t\tfor i in reversed(range(layout.count())):\n\t\t\t\tlayoutItem = layout.itemAt(i)\n\n\t\t\t\tif type(layoutItem) is QSpacerItem:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif layoutItem.widget() is not None:\n\t\t\t\t\twidgetToFilter = layoutItem.widget()\n\n\t\t\t\t\tif isinstance(widgetToFilter, QLabel):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tif isinstance(widgetToFilter, QLineEdit):\n\t\t\t\t\t\tlist_name_cut = widgetToFilter.objectName()\n\t\t\t\t\t\tdata_filter = widgetToFilter.text()\n\n\t\t\t\t\tif isinstance(widgetToFilter, QGroupBox):\n\t\t\t\t\t\tfor item in widgetToFilter.findChildren(QRadioButton):\n\t\t\t\t\t\t\tif item.isChecked():\n\t\t\t\t\t\t\t\tlist_name_cut = item.objectName()\n\t\t\t\t\t\t\t\tdata_filter = item.isChecked()\n\n\t\t\t\t\tif isinstance(widgetToFilter, QComboBox):\n\t\t\t\t\t\tlist_name_cut = widgetToFilter.objectName()\n\n\t\t\t\t\t\tif widgetToFilter.currentData():\n\t\t\t\t\t\t\tdata_filter = widgetToFilter.currentData()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdata_filter = widgetToFilter.currentText()\n\n\t\t\t\t\tfilter_layout.update({list_name_cut: data_filter})\n\n\t\t\t\telse:\n\t\t\t\t\tlayoutToFilter = layout.itemAt(i)\n\t\t\t\t\tsearch_filter(layoutToFilter)\n\n\t\tsearch_filter(self.ui.layoutSearchArendator)\n\n\t\t# self.list_new_transports = self.users_db.list_transport(filters=filter_cb)\n\t\t# print(f'filter_layout: {filter_layout}')\n\n\t\tself.base_list_arendators = self.users_db.base_list_arendators(filters_arendators=filter_layout)\n\t\tself.loadingBaseListArendators(self.base_list_arendators)\n\n\n\tdef loadingBaseListArendators(self, list_arendators):\n\t\tif self.role in ['root', 'admin']:\n\t\t\titem_arendators = list_arendators['all_arendators']\n\t\telse:\n\t\t\titem_arendators = list_arendators['for_manadgers']\n\n\t\twhile self.ui.arendators_base_layout.count() > 0:\n\t\t\titem = self.ui.arendators_base_layout.takeAt(0)\n\t\t\titem.widget().deleteLater()\n\n\t\tfor item in item_arendators:\n\t\t\tself.count_item_base_list_arendator += 1\n\t\t\titem_arendator = BaseItemArendatorWidget(\n\t\t\t\tid_widget_base_item_arendator = self.count_item_base_list_arendator,\n\t\t\t\tid_arendator = item[0].id,\n\t\t\t\thiden_arendator_db = item[0].hiden_arendator,\n\t\t\t\tcount_contracts = item.count_conract,\n\t\t\t\tsurname = item[0].surname,\n\t\t\t\tname = item[0].name,\n\t\t\t\tlast_name = item[0].last_name,\n\t\t\t\tdate_created = item[0].date_created,\n\t\t\t\trole = self.role,\n\t\t\t\tparent = self,\n\t\t\t)\n\t\t\tself.ui.arendators_base_layout.addWidget(item_arendator)\n\t\t\titem_arendator.clicked_arendator.connect(self.get_areandator_contracted)\n\t\t\titem_arendator.delete_arendator.connect(self.delete_arendatorwidget)\n\t\t\titem_arendator.hidened_arendator.connect(self.hidened_arendatorwidjet)\n\t\t\titem_arendator.histored_payments.connect(self.histored_paymentswidjet)\n\n\t\tself.statusBar().showMessage(f\"Выбрано клиентов: {item_arendators.count()}\")\n\n\n\t# def chartsBase(self):\n\t# \tresult = self.users_db.charts_data()\n\t# \tprint(result)\n\t# \treturn result\n\n\n\n\t@pyqtSlot()\n\tdef histored_paymentswidjet(self):\n\t\twidget = self.sender()\n\t\tself.effect.setEnabled(True)\n\t\tlist_histored_payment = HistoryPaymentsWindow(ids_arendator=int(widget.ui.lbl_id_arendator.text()), parent=self)\n\t\tlist_histored_payment.show()\n\n\t# @pyqtSlot()\n\tdef hidened_arendatorwidjet(self, status):\n\t\twidget = self.sender()\n\t\tself.effect.setEnabled(True)\n\n\t\tself.msg = QMessageBox(self)\n\t\tself.msg.setWindowIcon(QIcon(\":/Images/logo-mini.png\"))\n\t\tself.msg.setIcon(QMessageBox.Question)\n\t\tself.msg.setWindowTitle(\"Отобразить клиента\" if status else \"Скрыть отображение клиента\")\n\t\tself.msg.setText(\n\t\t\tf\"Вы действительно хотите отобразить клиента {widget.ui.lbl_fio_arendator.text()} ?\" if status else f\"Вы действительно хотите скрыть клиента {widget.ui.lbl_fio_arendator.text()} ?\")\n\t\tself.buttonAceptar = self.msg.addButton(\"Да, хочу\", QMessageBox.YesRole)\n\t\tself.buttonCancelar = self.msg.addButton(\"Отменить\", QMessageBox.RejectRole)\n\t\tself.msg.setDefaultButton(self.buttonAceptar)\n\n\t\tfor button in self.msg.buttons():\n\t\t\tbutton.setCursor(QCursor(Qt.PointingHandCursor))\n\t\tself.msg.exec_()\n\n\t\tif self.msg.clickedButton() == self.buttonAceptar:\n\t\t\tself.effect.setEnabled(False)\n\n\t\t\tif self.role not in ['root', 'admin']:\n\t\t\t\tself.ui.arendators_base_layout.removeWidget(widget)\n\t\t\t\twidget.deleteLater()\n\t\t\t\twidget.destroy()\n\n\t\t\tresponce = self.users_db.hidened_arendator(ids=widget.ui.lbl_id_arendator.text(), hidened = status)\n\t\t\tself.update()\n\t\t\tself.responses.message_from_db(responce, self.statusBar(), f'Клиент успешно отображен' if status else f'Клиент успешно скрыт')\n\n\t\telif self.msg.clickedButton() == self.buttonCancelar:\n\t\t\tself.effect.setEnabled(False)\n\t\t\tself.msg.close()\n\t\telse:\n\t\t\tself.effect.setEnabled(False)\n\t\t\tself.msg.close()\n\n\n\n\t@pyqtSlot()\n\tdef delete_arendatorwidget(self):\n\t\twidget = self.sender()\n\t\tself.effect.setEnabled(True)\n\n\t\tself.msg = QMessageBox(self)\n\t\tself.msg.setWindowIcon(QIcon(\":/Images/logo-mini.png\"))\n\t\tself.msg.setWindowTitle(\"Удаление учетной записи клиента\")\n\t\tself.msg.setIcon(QMessageBox.Question)\n\t\tself.msg.setText(\n\t\t\tf\"Вы действительно хотите удалить клиента {widget.ui.lbl_fio_arendator.text()} ?\")\n\n\t\tself.buttonAceptar = self.msg.addButton(\"Да, хочу\", QMessageBox.YesRole)\n\t\tself.buttonCancelar = self.msg.addButton(\"Отменить\", QMessageBox.RejectRole)\n\t\tself.msg.setDefaultButton(self.buttonAceptar)\n\n\t\tfor button in self.msg.buttons():\n\t\t\tbutton.setCursor(QCursor(Qt.PointingHandCursor))\n\n\t\tself.msg.exec_()\n\n\t\tif self.msg.clickedButton() == self.buttonAceptar:\n\t\t\tself.effect.setEnabled(False)\n\n\t\t\tself.ui.arendators_base_layout.removeWidget(widget)\n\n\t\t\tresponce = self.users_db.delete_arendator(ids=widget.ui.lbl_id_arendator.text())\n\t\t\twidget.deleteLater()\n\t\t\twidget.destroy()\n\n\t\t\tself.responses.message_from_db(responce, self.statusBar(), f'Клиент успешно удален')\n\n\t\telif self.msg.clickedButton() == self.buttonCancelar:\n\t\t\tself.effect.setEnabled(False)\n\t\t\tself.msg.close()\n\t\telse:\n\t\t\tself.effect.setEnabled(False)\n\t\t\tself.msg.close()\n\n\t@pyqtSlot()\n\tdef get_areandator_contracted(self):\n\t\twidget = self.sender()\n\t\tself.effect.setEnabled(True)\n\t\tarendator_card = ArendatorWindow(id_arendator=int(widget.ui.lbl_id_arendator.text()), parent=self)\n\t\tarendator_card.show()\n\n\t@pyqtSlot()\n\tdef update(self) -> None:\n\t\tprint('UPDATERRRRRRRRRRRRRRRRRRRR 2222222222222222')\n\t\t# self.loadingBaseListArendators(self.base_list_arendators)\n\t\tself.base_list_arendators = self.users_db.base_list_arendators()\n\t\tself.loadingBaseListArendators(self.base_list_arendators)\n\n\n\n\n\n\n\n\n\n\n\n\t# def eventFilter(self, obj, event):\n\t#\n\t# \tif obj == self.ui.frm_content and event.type() == event.Resize:\n\t# \t\tprint(\"dock\")\n\t# \t\tprint(obj.objectName())\n\t# \t\t# print(self.hamburger.isHidden())\n\t# \treturn super(MainBaseWindow, self).eventFilter(obj, event)\n\n\n\n\t# def eventFilter (self, obj, event):\n\t# \tif obj in {self.hamburger, self.centralWidget()} and event.type() == event.Resize:\n\t# \t\tprint(\"dock\")\n\t# \treturn super().eventFilter(obj, event)\n\n\t# def event (self, e):\n\t# \tif e.type() == QEvent.Resize and e.type() == QEvent.ShowToParent:\n\t# \t\t# print(e.type())\n\t# \t\tself.hamburger.resize_menu(self)\n\t# \treturn QMainWindow.event(self, e)\n\n\n\n","repo_name":"alexsul2008/Arenda","sub_path":"ui/pages/windowBaseMain____old.py","file_name":"windowBaseMain____old.py","file_ext":"py","file_size_in_byte":12923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"14869801970","text":"#from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.generic import FormView, ListView, TemplateView, DetailView\nfrom django.template import loader\nfrom django.shortcuts import get_object_or_404, render\n\nfrom .forms import EquipoForm\nfrom .models import Equipo\n\ndef detailView(request, equipo_id):\n equipo = get_object_or_404(Equipo, pk = equipo_id)\n context = { 'equipo': equipo }\n template = loader.get_template('equipo/detalle_equipo.html')\n return HttpResponse(template.render(context, request))\n\ndef opcionarEquipos(request):\n equipos_list = Equipo.objects.order_by('nombre')\n template = loader.get_template('equipo/seleccionar_equipo.html')\n context = {\n 'equipo_list': equipos_list,\n }\n return HttpResponse(template.render(context, request))\n\ndef equiposEnTexto(request):\n equipos_list = Equipo.objects.order_by('nombre')\n output = ', '.join([q.nombre for q in equipos_list])\n output = \"Estos son los equipos del torneo: \" + output\n return HttpResponse(output)\n\nclass EquipoView(FormView):\n template_name = 'equipo/equipo_form.html'\n form_class = EquipoForm\n success_url = '/'\n\n def form_valid(self, form):\n form.save()\n return super(EquipoView, self).form_valid(form)\n\n def form_invalid(self, form):\n return super(EquipoView, self).form_invalid(form)\n\nclass ListarView(ListView):\n variablePub = 'Hola Soy Juanjo'\n model = Equipo\n template_name = 'equipo/list_equipo.html'\n\t#contexto_object_name = 'equipos'\n","repo_name":"juanjosegdoj/AdminBasket","sub_path":"apps/equipo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"18829013853","text":"import pandas as pd\nimport numpy as np\nfrom enum import Enum, auto\n\nclass GannSwing():\n '''\n Class to perform Gann swing calculations\n '''\n def __init__(self, bars:pd.DataFrame):\n '''\n Parameters:\n - (required) a pandas dataframe containing OHLC data\n - (optional) the number of days required to trigger a swing\n - (optional) should an inside day in a down trend trigger a swing?\n - (optional) what is a small enough swing to ignore\n - (optional) use the close of an outside bar to decide the swing direction\n '''\n self.bars = bars\n self.__validate_bars(bars)\n #self.swing_days = swing_days\n #self.inside_down = inside_down\n #self.ignore_threshold = ignore_threshold\n #self.use_close_of_outside_bar = use_close_of_outside_bar\n #self.__parameter_validation()\n\n def __validate_bars(self, bars):\n if not isinstance(self.bars, pd.DataFrame):\n raise TypeError('bars should be a Pandas dataframe')\n mandatory_columns = ['Timestamp', 'Open', 'High', 'Low', 'Close']\n columns = list(bars.columns)\n for i in mandatory_columns:\n if i not in columns:\n raise IndexError('bars is missing a column named \"%s\"' % i)\n \n\n def __parameter_validation(self):\n '''\n Ensure that the values supplied to GannSwing() are valid\n '''\n if not isinstance(self.swing_days, int):\n raise TypeError('swing_days should be an integer')\n if not self.swing_days > 0:\n raise ValueError('swing_days should be a positive integer')\n if not isinstance(self.inside_down, bool):\n raise TypeError('inside_down should be a boolean')\n if not (isinstance(self.ignore_threshold, float) or isinstance(self.ignore_threshold, int)):\n raise TypeError('ignore_threshold should be a float or int')\n if isinstance(self.ignore_threshold, bool):\n raise TypeError('ignore_threshold should not be a boolean')\n if not self.ignore_threshold >= 0:\n raise ValueError('ignore_threshold should be a positive value')\n if not isinstance(self.use_close_of_outside_bar, bool):\n raise TypeError('use_close_of_outside_bar should be a boolean')\n\n \n\n class Trend(Enum):\n UNKNOWN = np.nan\n UP = 'Up'\n DOWN = 'Down'\n\n def calculate_swings(self, swing_days:int=1, inside_down:bool=False, ignore_threshold:int=0, use_close_of_outside_bar:bool=False) -> pd.DataFrame:\n self.swing_days = swing_days\n self.inside_down = inside_down\n self.ignore_threshold = ignore_threshold\n self.use_close_of_outside_bar = use_close_of_outside_bar\n self.__parameter_validation()\n results = pd.DataFrame(columns = ['Timestamp', 'SwingStartDate', 'SwingStartPrice', 'SwingStartBarID', 'SwingEndDate', 'SwingEndPrice', 'SwingEndBarID', 'ConfirmTimestamp', 'TradeableRange', 'Trend'])\n\n return results\n\n def _up_day(self, bar:int):\n '''\n Return True if bar is an up day, else False\n '''\n this_bar = self.bars.iloc[bar]\n try:\n previous_bar = self.bars.iloc[bar-1]\n if this_bar['Low'] >= previous_bar['Low'] and this_bar['High'] > previous_bar['High']:\n return True\n except IndexError:\n pass\n return False\n\n def _down_day(self, bar:int):\n '''\n Return True if bar is a down day, else False\n '''\n this_bar = self.bars.iloc[bar]\n try:\n previous_bar = self.bars.iloc[bar-1]\n if this_bar['Low'] < previous_bar['Low'] and this_bar['High'] <= previous_bar['High']:\n return True\n except IndexError:\n pass\n return False\n\n def _inside_day(self, bar:int):\n '''\n Return True if bar is an inside day, else False\n '''\n this_bar = self.bars.iloc[bar]\n try:\n previous_bar = self.bars.iloc[bar-1]\n if this_bar['Low'] >= previous_bar['Low'] and this_bar['High'] <= previous_bar['High']:\n return True\n except IndexError:\n pass\n return False\n\n def _outside_day(self, bar:int):\n '''\n Return True if bar is an outside day, else False\n '''\n this_bar = self.bars.iloc[bar]\n try:\n previous_bar = self.bars.iloc[bar-1]\n if this_bar['Low'] < previous_bar['Low'] and this_bar['High'] > previous_bar['High']:\n return True\n except IndexError:\n pass\n return False\n\n def _find_turns(self, swing_days):\n for i in range(swing_days+1, len(self.bars)):\n for j in range(1, swing_days):\n if self.__down_day(i-j) and self.__up_day(i+j):\n break\n row = pd.DataFrame({'Swing': self.bars.iloc[i]})\n pass\n\n def visualise(self):\n '''\n Draw an OHLC chart of the bars data. If swings have been calculated, overlay them\n on top of the OHLC chart\n '''\n import plotly.graph_objects as go\n\n # When you hover over a bar on the chart, you should see the OHLC values\n hovertext=[]\n for i in range(len(self.bars['Open'])):\n hovertext.append('Open: '+str(self.bars['Open'][i])+'
High: '+str(self.bars['High'][i])+'
Low: '+str(self.bars['Low'][i])+'
Close: '+str(self.bars['Close'][i]))\n\n fig = go.Figure(data=go.Ohlc(x=self.bars['Timestamp'],\n open=self.bars['Open'],\n high=self.bars['High'],\n low=self.bars['Low'],\n close=self.bars['Close']),\n #text=hovertext,\n #hoverinfo='text'\n )\n fig.update(layout_xaxis_rangeslider_visible=False)\n\n #if self.swing_days():\n # Overlay a swing chart on top of the bar chart\n # go.update_layout(...)\n #pass # Remove this line when the swing charts are working\n fig.show()\n\n def ticksize(self):\n '''\n Calculate ticksize from the last BARS_TO_USE bars. It's not perfect, but close enough for government work...\n '''\n BARS_TO_USE = 20\n last_N_bars = self.bars.tail(BARS_TO_USE)\n last_N_bars = last_N_bars.drop(columns=['Timestamp'])\n prices = set()\n\n # Add all the OHLC values from the last BARS_TO_USE bars to a set & sort it\n for _, row in last_N_bars.iterrows():\n prices.add(row['Open'])\n prices.add(row['High'])\n prices.add(row['Low'])\n prices.add(row['Close'])\n p1 = sorted(prices)\n\n # Find the smallest gap between consecutive items in the set\n ticksize = 10000000\n for first, second in zip(p1, p1[1:]):\n ticksize = min(ticksize, round(second-first, 6))\n return(ticksize)\n\n\nif __name__ == '__main__':\n gs = GannSwing(bars=pd.DataFrame())","repo_name":"monch1962/gann-swing","sub_path":"gannswing.py","file_name":"gannswing.py","file_ext":"py","file_size_in_byte":6990,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"29230471879","text":"def preorder_traverse(T):\n if T > 13:\n pass\n else:\n if not visited[T]:\n print(tree_list[T])\n visited[T] = True\n preorder_traverse(T*2)\n preorder_traverse(T*2+1)\n\n\nn = 13\nvisited = [False]*15\n# 1 / -> 2 / -> 4 -> 8 -> 9 / -> 5\na = '1 2 1 3 2 4 3 5 3 6 4 7 5 8 5 9 6 10 6 11 7 12 11 13'\ntr = list(map(int, a.split()))\ntest = [[0, 0] for _ in range(32)]\nprint(test)\nfor i in range(0, len(tr), 2):\n if test[tr[i]*2][0] != 0:\n test[tr[i]*2+1][1] = tr[i+1]\n else:\n test[tr[i]*2][0] = tr[i+1]\n\nprint(test)\ntree_list = [0]*32\nprint(tree_list)\n\npreorder_traverse(0)\n\n\n#\n# tree = [1, 2, 1, 3, 2, 4, 3, 5, 3, 6, 4, 7, 5, 8, 5, 9, 6, 10, 6, 11, 7, 12, 11, 13]\n# visited = [0]*14\n#\n# relation = [[0]*2 for _ in range(14)]\n#\n# for i in range(0, len(tree), 2):\n# if relation[tree[i]][0] == 0:\n# relation[tree[i]][0] = tree[i+1]\n# else:\n# relation[tree[i]][1] = tree[i+1]\n#\n# def tree(relation, node):\n# global visited\n# if relation[node]:\n# visited[node] = 1\n# print(node)\n# if relation[node][0]:\n# tree(relation, relation[node][0])\n# if relation[node][1]:\n# tree(relation, relation[node][1])\n#\n# tree(relation, 1)","repo_name":"JJayeee/CodingPractice","sub_path":"SWExpertAcademy/LEARN/08. Tree/1_Traversal.py","file_name":"1_Traversal.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32627818075","text":"from django.test import TestCase, Client\nfrom blog.views import about, PostListView, PostDetailView, PostCreateView\nfrom django.urls import reverse\nfrom blog.models import Post\nfrom pytest_django.asserts import assertTemplateUsed\nfrom django.contrib.auth.models import User\n\nclass Test_Views(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.client = Client()\n cls.home_url = reverse('blog-home')\n cls.about_url = reverse('blog-about')\n cls.login_url = reverse(\"login\") \n cls.create_url = reverse('post-create')\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def setUp(self):\n self.user = User.objects.create(username=\"Profile_user\", email='profileUser@company.com')\n self.user.set_password('testing321')\n self.user.save()\n\n self.post = self.user.post_set.create(title='Test post', content='test post content')\n self.post = self.user.post_set.create(title='Test post1', content='test post content')\n self.post = self.user.post_set.create(title='Test post2', content='test post content')\n self.post = self.user.post_set.create(title='Test post3', content='test post content')\n\n self.detail_url = reverse('post-detail', args=[self.post.id])\n self.post_count = Post.objects.count()\n self.posts_per_page = PostListView.paginate_by\n\n self.user_posts_url = reverse('user-posts',args=[self.user.username])\n\n def tearDown(self):\n self.user.delete()\n\n def test_home_GET(self):\n response = self.client.get(self.home_url)\n \n assert response.context['posts'].count() == min(self.post_count, self.posts_per_page)\n assert response.status_code == 200\n assertTemplateUsed(response, 'blog/home.html')\n\n def test_user_posts_GET(self):\n response = self.client.get(self.user_posts_url)\n \n assert response.status_code == 200\n assertTemplateUsed(response, 'blog/user_posts.html')\n \n def test_about_GET(self):\n response = self.client.get(self.about_url)\n \n assert response.status_code == 200\n assertTemplateUsed(response, 'blog/about.html')\n\n \n def test_post_detail_GET(self):\n response = self.client.get(self.detail_url)\n\n post = response.context['post']\n assert post.id == self.post.id\n assert post.title == self.post.title\n assert response.status_code == 200\n assertTemplateUsed(response, 'blog/post_detail.html')\n\n def test_post_create_GET_not_logged_in(self):\n response = self.client.get(self.create_url)\n\n self.assertRedirects(response, self.login_url+\"?next=\"+self.create_url, status_code=302, \n target_status_code=200, fetch_redirect_response=True)\n\n def test_post_create_GET_logged_in(self):\n logged_in = self.client.login(username = self.user.username, password='testing321')\n assert logged_in == True\n \n response = self.client.get(self.create_url) \n \n assert response.status_code == 200","repo_name":"Michael-Schembri/DjangoBasics","sub_path":"django_project/blog/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"10958324163","text":"import requests\n\nclass Airport():\n def __init__(self):\n self.path_file_employee = \"src/empleados.txt\"\n self.readFile()\n\n def readFile(self):\n try:\n file_employee = open(self.path_file_employee, \"r\", encoding=\"UTF-8\")\n self.data_file = file_employee.readlines()\n file_employee.close()\n except:\n print(\"ERROR: No se pudo acceder al archivo\")\n else:\n return self.fileToJson()\n\n def fileToJson(self):\n try:\n airport_id = self.getIdAirport()\n for airport in self.data_file:\n airport = airport.strip(\"\\n\").split(\", \")\n # Airport Data\n airport_name = airport[4]\n # Country Id\n country = airport[2][0:3:1].upper()\n # Json\n id_country = {\"id_country\": self.getIdCountry(country)}\n self.airport_json = {\"id_airport\": airport_id, \"name\": airport_name, \"country\": id_country}\n self.postAirport()\n airport_id += 1\n except:\n print(\"Error: No se puede llenar la tabla Airport, intente llenando la tabla Country primero\")\n \n def postAirport(self):\n api_url = \"http://localhost:8080/Airport/createAirport\"\n response = requests.post(api_url, json=self.airport_json)\n response.json()\n print(\"Airport-Insert: \", response.status_code)\n \n def getIdAirport(self):\n api_url = \"http://localhost:8080/Airport/getAirport\"\n response = requests.get(api_url)\n try: \n id = response.json()[0][\"id_airport\"]\n except:\n id = 1\n finally:\n return int(id)\n \n def getIdCountry(self, code):\n api_url = \"http://localhost:8080/Country/getCountry\"\n response = requests.get(api_url)\n for data in response.json():\n if data[\"code\"] == code:\n id = data[\"id_country\"]\n return id\n","repo_name":"EMcoding17/EvaluacionPython-Spring","sub_path":"controllers/airport.py","file_name":"airport.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"36967541430","text":"import logging\nimport pwnagotchi.plugins as plugins\nimport datetime\nimport pandas as pd\n\n\nclass Timer(plugins.Plugin):\n __author__ = 'idoloninmachina@gmail.com'\n __version__ = '0.1.0'\n __license__ = 'GPL3'\n __description__ = 'Measure the amount of time taken by the pwnagotchi to capture a handshake.'\n __name__ = 'Timer'\n __help__ = \"\"\"\n Measure the amount of time taken by the pwnagotchi to capture a handshake.\n \"\"\"\n __dependencies__ = {\n 'pip': ['scapy'],\n }\n __defaults__ = {\n 'enabled': False,\n }\n\n def __init__(self):\n self.running = False\n self.data = {\n 'Time to deauth': [],\n 'Time to handshake': [],\n 'Time between deauth and handshake': [],\n }\n self.reset_times()\n\n def on_epoch(self, agent, epoch, epoch_data):\n self.reset_times()\n\n def on_loaded(self):\n logging.info(\"[Timer] plugin loaded\")\n\n def on_unload(self, ui):\n logging.info(\"[Timer] Plugin unloaded\")\n\n def on_wifi_update(self, agent, access_points):\n time = datetime.datetime.now()\n self.wifi_update_time = time\n\n def on_deauthentication(self, agent, access_point, client_station):\n time = datetime.datetime.now()\n self.wifi_deauth_time = time\n\n def on_handshake(self, agent, filename, access_point, client_station):\n time = datetime.datetime.now()\n self.wifi_handshake_time = time\n\n self.process_data()\n self.reset_times()\n\n def process_data(self):\n # We have captured a handshake, so we need to check if it was a passive capture\n if self.wifi_deauth_time is None:\n # We haven't deauthed anyone, so this was just a passive capture\n # Not relevant to the data we want\n return\n\n self.data['Time to deauth'].append(\n self.calculate_difference_in_seconds(\n self.wifi_update_time, self.wifi_deauth_time))\n self.data['Time to handshake'].append(\n self.calculate_difference_in_seconds(\n self.wifi_update_time, self.wifi_handshake_time))\n self.data['Time between deauth and handshake'].append(\n self.calculate_difference_in_seconds(\n self.wifi_deauth_time, self.wifi_handshake_time))\n\n df = pd.DataFrame(self.data, columns=['Time to deauth',\n 'Time to handshake',\n 'Time between deauth and handshake', ])\n logging.info('[Timer] data saved')\n logging.info(df)\n df.to_csv('/home/pi/data/pwnagotchi_times.csv')\n\n def calculate_difference_in_seconds(self, past, future):\n difference = future - past\n return difference.total_seconds()\n\n def reset_times(self):\n self.wifi_update_time = None\n self.wifi_deauth_time = None\n self.wifi_handshake_time = None\n","repo_name":"itsdarklikehell/pwnagotchi-plugins","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"46"} +{"seq_id":"70113583499","text":"from __future__ import absolute_import, division, print_function\nfrom libtbx.utils import format_cpu_times\nimport sys, os, subprocess, tempfile, platform\nfrom iotbx.map_model_manager import map_model_manager\nfrom iotbx.data_manager import DataManager\nfrom cctbx.maptbx.box import shift_and_box_model\nimport mmtbx\nimport libtbx.load_env\n\nimport boost_adaptbx.boost.python as bp\nbp.import_ext(\"mmtbx_probe_ext\")\nimport mmtbx_probe_ext as probeext\n\nfrom mmtbx.probe import Helpers, AtomTypes\n\ndef RunProbeTests(inFileName):\n\n #========================================================================\n # Call the test functions for the libraries we test.\n\n ret = probeext.DotSpheres_test()\n assert len(ret) == 0, \"DotSpheres_test() failed: \" + ret\n\n ret = probeext.SpatialQuery_test()\n assert len(ret) == 0, \"SpatialQuery_test() failed: \" + ret\n\n ret = probeext.Scoring_test()\n assert len(ret) == 0, \"Scoring_test() failed: \" + ret\n\n AtomTypes.Test()\n Helpers.Test()\n\n #========================================================================\n # Now ensure that we can use the C++-wrapped classes as intended to make sure\n # that the wrapping code or parameters have not changed.\n\n #========================================================================\n # Make sure we can get at the DotSphere objects and their methods\n cache = probeext.DotSphereCache(10)\n sphere1 = cache.get_sphere(1)\n dots = sphere1.dots()\n\n #========================================================================\n # Make sure we can fill in an ExtraAtomInfoList and pass it to scoring\n # Generate an example data model with a small molecule in it unless we've\n # been given a file name to open.\n if inFileName is not None and len(inFileName) > 0:\n # Read a model from a file using the DataManager\n dm = DataManager()\n dm.process_model_file(inFileName)\n model = dm.get_model(inFileName)\n else:\n # Generate a small-molecule model using the map model manager\n mmm=map_model_manager() # get an initialized instance of the map_model_manager\n mmm.generate_map() # get a model from a generated small library model and calculate a map for it\n model = mmm.model() # get the model\n\n # Fix up bogus unit cell when it occurs by checking crystal symmetry.\n cs = model.crystal_symmetry()\n if (cs is None) or (cs.unit_cell() is None):\n model = shift_and_box_model(model = model)\n\n # Get the list of all atoms in the model\n atoms = model.get_atoms()\n\n # Get the bonding information we'll need to exclude our bonded neighbors.\n try:\n p = mmtbx.model.manager.get_default_pdb_interpretation_params()\n model.process(make_restraints=True, pdb_interpretation_params=p) # make restraints\n geometry = model.get_restraints_manager().geometry\n sites_cart = model.get_sites_cart() # cartesian coordinates\n bond_proxies_simple, asu = \\\n geometry.get_all_bond_proxies(sites_cart = sites_cart)\n except Exception as e:\n raise Exception(\"Could not get bonding information for input file: \" + str(e))\n bondedNeighbors = Helpers.getBondedNeighborLists(atoms, bond_proxies_simple)\n\n # Traverse the hierarchy and look up the extra data to be filled in.\n class philLike:\n def __init__(self, useImplicitHydrogenDistances = False):\n self.implicit_hydrogens = useImplicitHydrogenDistances\n self.set_polar_hydrogen_radius = True\n ret = Helpers.getExtraAtomInfo(model,bondedNeighbors,\n useNeutronDistances=False,probePhil=philLike(False))\n extra = ret.extraAtomInfo\n\n # Construct a SpatialQuery and fill in the atoms. Ensure that we can make a\n # query within 1000 Angstroms of the origin.\n sq = probeext.SpatialQuery(atoms)\n nb = sq.neighbors((0,0,0), 0, 1000)\n\n # Construct a DotScorer object.\n # Find the radius of each atom in the structure and construct dot spheres for\n # them. Find the atoms that are bonded to them and add them to an excluded list.\n # Then compute the score for each of them and report the summed score over the\n # whole molecule the way that Reduce will.\n ds = probeext.DotScorer(extra)\n total = 0\n badBumpTotal = 0\n for a in atoms:\n rad = extra.getMappingFor(a).vdwRadius\n assert rad > 0, \"Invalid radius for atom look-up: \"+a.name+\" rad = \"+str(rad)\n sphere = cache.get_sphere(rad)\n\n # Excluded atoms that are bonded to me or to one of my neightbors.\n # It has the side effect of excluding myself if I have any neighbors.\n # Construct as a set to avoid duplicates.\n exclude = set()\n for n in bondedNeighbors[a]:\n exclude.add(n)\n for n2 in bondedNeighbors[n]:\n exclude.add(n2)\n exclude = list(exclude)\n\n dots = sphere.dots()\n res = ds.score_dots(a, 1.0, sq, rad*3, 0.25, exclude, sphere.dots(), sphere.density(), False, False)\n total += res.totalScore()\n if res.hasBadBump:\n badBumpTotal += 1\n\n # Test calling the single-dot checking code as will be used by Probe to make sure\n # all of the Python linkage is working\n dotOffset = [1, 0, 0]\n check = ds.check_dot(atoms[0], dotOffset, 1, atoms, [atoms[0]])\n overlapType = check.overlapType\n\n # Test calling the interaction_type method to be sure Python linkage is working\n interactionType = ds.interaction_type(check.overlapType, check.gap)\n\n #========================================================================\n # Regression test a Probe2 run against a snippet of a file, comparing the output\n # to the output generated by a previous version of the program. If there are\n # differences, report that this is the case and recommend verifying that the\n # differences are intentional and replacing the stored output.\n data_dir = libtbx.env.under_dist(\n module_name = \"mmtbx\",\n path = os.path.join(\"regression\",\"pdbs\"),\n test = os.path.isdir)\n model_file = os.path.join(data_dir,'Fe_1brf_snip_reduced.pdb')\n kin_dir = libtbx.env.under_dist(\n module_name = \"mmtbx\",\n path = os.path.join(\"regression\",\"kins\"),\n test = os.path.isdir)\n kin_file = os.path.join(kin_dir,'Fe_1brf_snip_reduced.kin')\n temp_file = os.path.join(tempfile._get_default_tempdir(),\n next(tempfile._get_candidate_names())+\".kin\" )\n try:\n my_env = os.environ\n exe_name = 'mmtbx.probe2'\n if platform.system() == 'Windows':\n exe_name += '.bat'\n if subprocess.check_call([exe_name\n ,'source_selection=\"all\"'\n ,'approach=self'\n ,'output.separate_worse_clashes=True'\n ,'output.file_name='+temp_file\n ,'include_mainchain_mainchain=True'\n ,'output.add_kinemage_keyword=True'\n ,model_file\n ], env = my_env\n , stdout = subprocess.DEVNULL\n , stderr = subprocess.DEVNULL):\n raise Exception(\"Call to subprocess to regression test had nonzero return\")\n except Exception as e:\n raise Exception(\"Could not call subprocess to do regression test: \"+str(e))\n with open(temp_file) as ft:\n ft_text = ft.readlines()\n with open(kin_file) as fk:\n fk_text = fk.readlines()\n instructions = (\"Use KiNG or another program to see what changed and then determine if the \"+\n \"differences are expected. If so, replace \"+kin_file+\" with the new file.\")\n if len(ft_text) != len(fk_text):\n raise Exception(\"Different number of lines in \"+temp_file+\" and \"+kin_file+instructions)\n for i in range(3,len(ft_text)):\n if ft_text[i] != fk_text[i]:\n print('Line',i,'from each file:')\n print(ft_text[i])\n print(fk_text[i])\n raise Exception(\"Line \"+str(i)+\" in \"+temp_file+\" and \"+kin_file+\" differ. \"+instructions)\n\nif __name__ == '__main__':\n\n #==============================================================\n # Parse command-line arguments. The 0th argument is the name\n # of the script. There can be the name of a PDB file to read.\n realParams = 0\n fileName = \"\"\n for i in range(1,len(sys.argv)):\n fileName = sys.argv[i]\n\n RunProbeTests(fileName)\n print(format_cpu_times())\n print('OK')\n","repo_name":"cctbx/cctbx_project","sub_path":"mmtbx/regression/tst_probe.py","file_name":"tst_probe.py","file_ext":"py","file_size_in_byte":8155,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"46"} +{"seq_id":"20740476807","text":"import numpy as np\nimport tensorflow as tf\n\nfrom trainer import *\nfrom trainer256 import *\nfrom config import get_config\nfrom utils import prepare_dirs_and_logger, save_config\n\nimport pdb, os\n\ndef main(config):\n prepare_dirs_and_logger(config)\n\n if config.gpu>-1:\n os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"]=str(config.gpu)\n\n config.data_format = 'NHWC'\n\n if 1==config.model: \n trainer = PG2(config)\n trainer.init_net()\n elif 11==config.model:\n trainer = PG2_256(config)\n trainer.init_net()\n \n if config.is_train:\n save_config(config)\n trainer.train()\n else:\n # if not config.load_path:\n # raise Exception(\"[!] You should specify `load_path` to load a pretrained model\")\n trainer.test()\n\nif __name__ == \"__main__\":\n config, unparsed = get_config()\n main(config)\n","repo_name":"charliememory/Pose-Guided-Person-Image-Generation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":293,"dataset":"github-code","pt":"46"} +{"seq_id":"71084183820","text":"from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework import status\nfrom base.models import Project\nfrom django.contrib.auth.models import User\nfrom base.serializers import ProjectSerializer\n\n# Create your views here.\n\n@api_view(['GET'])\ndef getProjects(request):\n projects = Project.objects.all()\n serializer = ProjectSerializer(projects, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef getProject(request, pk):\n project = Project.objects.get(id=pk)\n serializer = ProjectSerializer(project, many=False)\n return Response(serializer.data)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef createProject(request):\n scrum = request.user\n data = request.data\n\n project = Project.objects.create(\n scrum=scrum,\n name='Sample Name',\n description='Sample Description'\n )\n serializer = ProjectSerializer(project, many=False)\n\n return Response(serializer.data)\n\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef addContributors(request, pk):\n data = request.data\n user_id = data['user']\n print('USER ID....', user_id)\n\n project = Project.objects.get(id=pk)\n user = User.objects.get(id=user_id)\n\n project.contributors.add(user)\n serializer = ProjectSerializer(project, many=False)\n return Response(serializer.data)\n\n@api_view(['PUT'])\n@permission_classes([IsAuthenticated])\ndef editProject(request, pk):\n data = request.data\n project = Project.objects.get(id=pk)\n\n project.name=data['name']\n project.description=data['description']\n\n project.save()\n serializer = ProjectSerializer(project, many=False)\n\n return Response(serializer.data)\n\n@api_view(['DELETE'])\n@permission_classes([IsAuthenticated])\ndef deleteProject(request, pk):\n project = Project.objects.get(id=pk)\n project.delete()\n return Response('Project was deleted successfully')\n","repo_name":"Manasseh-Kinyua/taskjar","sub_path":"base/views/project_views.py","file_name":"project_views.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"38260177806","text":"# http://news.naver.com\r\n# data/naver_news.html\r\n# 뉴스제목 추출\r\n\r\n# //ul.newsnow_txarea li div a strong\r\n# //ul[@class=\"mlist2 no_bg\"]/li/a/strong\r\n# ul > li:nth-child(1) > a > strong\r\n\r\nimport requests\r\nimport lxml.html\r\n\r\nurl = 'http://news.naver.com'\r\nres = requests.get(url)\r\nhtml = res.text\r\nroot = lxml.html.fromstring(html)\r\n\r\n# for part_html in root.cssselect('ul.newsnow_txarea li div a strong'):\r\n# print(part_html.text_content())\r\ncnt = 1 # 출력 횟수 지정\r\n\r\nfor part_html in root.xpath('//ul[@class=\"mlist2 no_bg\"]/li/a/strong'):\r\n print(part_html.text_content())\r\n\r\n\r\n if cnt % 5 == 0: print('\\r\\n')\r\n cnt = cnt + 1;\r\n\r\n\r\n\r\n# 탑 주요뉴스\r\n# # # 인절미 //*[@id=\"pan_today_main_news\"]/div[1]/div/a/div[2]\r\n# for part_html in root.xpath('//p[@class =\"nowsnow_img_mask_p\"]'):\r\n# print(part_html.text_content())\r\n\r\nfor part_html in root.cssselect('p.newsnow_img_mask_p'):\r\n print(part_html.text_content().strip())\r\n# 분야별 탑 주요 뉴스\r\n\r\n# 최종 시간\r\nfor part_html in root.xpath('//span[@class=\"small\"]/em'):\r\n print('' +part_html.text_content() + '\\r\\n')\r\n#\r\n# for part_html in root.cssselect('span'):\r\n# print('' +part_html.text_content() + '\\r\\n')\r\n\r\n\r\n# //*[@id=\"text_today_main_news_801001\"]/li[1]/div/a/strong\r\n# # 인절미 //*[@id=\"pan_today_main_news\"]/div[1]/div/a/div[2]\r\n# # - > //div[@class =\"nowsnow_imgarea\"]/a/div/div\r\n# # 김정은 현장연결 //*[@id=\"section_politics\"]/div[2]/div/ul/li[1]/a/strong\r\n# # - > //ul[@class=\"mlist2 no_bg\"]/li/a/strong\r\n# //*[@id=\"section_politics\"]/div[2]/div/ul/li[1]/a/strong\r\n\r\n\r\n\r\n\r\n# 가장 많이 본 뉴스\r\n\r\nprint('가장 많이 본 뉴스 :')\r\n# for part_html in root.xpath('//ul[@class=\"section_list_ranking\"]/li/a'):\r\n# print('' +part_html.text_content() + '\\r\\n')\r\n\r\nfor part_html in root.cssselect('ul.section_list_ranking li a'):\r\n print(part_html.text_content())\r\n\r\n\r\n\r\n","repo_name":"SonDog0/bigdata","sub_path":"py1809/hello_request03.py","file_name":"hello_request03.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"33711855947","text":"\n\"\"\"\nCreated on Dec 8, 2011\n\n@author: thygrrr\n\"\"\"\nfrom PyQt5 import QtCore, QtWidgets\n\n\nclass ChatLineEdit(QtWidgets.QLineEdit):\n \"\"\"\n A special promoted QLineEdit that is used in channel.ui to provide a\n mirc-style editing experience with completion and history.\n LATER: History and tab completion support\n \"\"\"\n\n def __init__(self, parent):\n QtWidgets.QLineEdit.__init__(self, parent)\n self.returnPressed.connect(self.on_line_entered)\n self.history = []\n self.currentHistoryIndex = None\n self.historyShown = False\n self.completionStarted = False\n self.channel = None\n self.LocalChatterNameList = []\n self.currenLocalChatter = None\n\n def set_channel(self, channel):\n self.channel = channel\n\n def event(self, event):\n if event.type() == QtCore.QEvent.KeyPress:\n # Swallow a selection of keypresses that we want for our history\n # support.\n if event.key() == QtCore.Qt.Key_Tab:\n self.try_completion()\n return True\n elif event.key() == QtCore.Qt.Key_Space:\n self.accept_completion()\n return QtWidgets.QLineEdit.event(self, event)\n elif event.key() == QtCore.Qt.Key_Up:\n self.cancel_completion()\n self.prev_history()\n return True\n elif event.key() == QtCore.Qt.Key_Down:\n self.cancel_completion()\n self.next_history()\n return True\n else:\n self.cancel_completion()\n return QtWidgets.QLineEdit.event(self, event)\n\n # All other events (non-keypress)\n return QtWidgets.QLineEdit.event(self, event)\n\n @QtCore.pyqtSlot()\n def on_line_entered(self):\n self.history.append(self.text())\n self.currentHistoryIndex = len(self.history) - 1\n\n def showEvent(self, event):\n self.setFocus(True)\n return QtWidgets.QLineEdit.showEvent(self, event)\n\n def try_completion(self):\n if not self.completionStarted:\n # no completion on empty line\n if self.text() == \"\":\n return\n # no completion if last character is a space\n if self.text().rfind(\" \") == (len(self.text()) - 1):\n return\n\n self.completionStarted = True\n self.LocalChatterNameList = []\n # take last word from line\n self.completionText = self.text().split()[-1]\n # store line to be completed without the completion string\n self.completionLine = self.text().rstrip(self.completionText)\n\n # make a copy of users because the list might change frequently\n # giving all kind of problems\n if self.channel is not None:\n for cc in self.channel.chatters.values():\n name = cc.chatter.name\n if name.lower().startswith(self.completionText.lower()):\n self.LocalChatterNameList.append(name)\n\n if len(self.LocalChatterNameList) > 0:\n self.LocalChatterNameList.sort(\n key=lambda chatter: chatter.lower(),\n )\n self.currenLocalChatter = 0\n localName = self.LocalChatterNameList[self.currenLocalChatter]\n self.setText(self.completionLine + localName)\n else:\n self.currenLocalChatter = None\n else:\n if self.currenLocalChatter is not None:\n self.currenLocalChatter += 1\n self.currenLocalChatter %= len(self.LocalChatterNameList)\n localName = self.LocalChatterNameList[self.currenLocalChatter]\n self.setText(self.completionLine + localName)\n\n def accept_completion(self):\n self.completionStarted = False\n\n def cancel_completion(self):\n self.completionStarted = False\n\n def prev_history(self):\n if self.currentHistoryIndex is not None: # no history nothing to do\n # check for boundaries and only change index is history is already\n # shown\n if self.currentHistoryIndex > 0 and self.historyShown:\n self.currentHistoryIndex -= 1\n self.historyShown = True\n self.setText(self.history[self.currentHistoryIndex])\n\n def next_history(self):\n if self.currentHistoryIndex is not None:\n # check for boundaries and only change index is history is already\n # shown\n if (\n self.currentHistoryIndex < len(self.history) - 1\n and self.historyShown\n ):\n self.currentHistoryIndex += 1\n self.historyShown = True\n self.setText(self.history[self.currentHistoryIndex])\n","repo_name":"FAForever/client","sub_path":"src/chat/chatlineedit.py","file_name":"chatlineedit.py","file_ext":"py","file_size_in_byte":4854,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"46"} +{"seq_id":"72257851339","text":"n, m = map(int, input().split())\n\nif n > m:\n a = m\nelse:\n a = n\nlcm = 1\nfor i in range(1, a + 1):\n if n % i == 0 and m % i == 0:\n lcm = i\ngcf = n * m // lcm\n\nprint(lcm)\nprint(gcf)\n","repo_name":"Conut-1/Python_Learning","sub_path":"백준/단계별로 풀기/NumberAndCombinatorics/3. Greatest Common Factor And Least Common Multiple_2609.py","file_name":"3. Greatest Common Factor And Least Common Multiple_2609.py","file_ext":"py","file_size_in_byte":196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"6899966814","text":"#!/usr/bin/env python3\n\n\"\"\"\n \\ /\n | _ X __ _ _ _|_\n | (_)/ \\ | (_|(_ |_\n\nUsage:\n loXract.py\n loXract.py ...\n loXract.py -h | -H | --help\n loXract.py -V | --version\n\nOptions:\n -h -H --help Displays this help.\n -V --version Ouputs the version of the script.\n Filename(s) of the .pcap file to extract data from.\n\nExamples:\n loXract.py\n loXract.py sniffs.pacp\n loXract.py sniffs0.pacp sniffs2.pacp sniffs4.pacp sniffsN.pcap\n loXract.py -h\n loXract.py -V\n\"\"\"\n\n__author__ = 'naryal2580'\n__version__ = 'v0.7'\n\nfrom stoyled import good, info, coolInput, coolExit, \\\n fetchFormatedTime, bad\nfrom scapy.all import rdpcap, Raw\nfrom docopt import docopt\n\n\ndef banner():\n logo = '''\n \\ /\n | _ X __ _ _ _|_\n | (_)/ \\ | (_|(_ |_ {{{}}}\n\n with {}<3{} {}--naryal2580{}\n '''.format(__version__,\n '\\033[31m',\n '\\033[0m',\n '\\033[1m',\n '\\033[0m')\n logo = logo.replace('X', '\\033[1m\\033[32mX\\033[0m')\n logo = logo.replace('|', '\\033[34m|\\033[0m')\n logo = logo.replace('_', '\\033[34m_\\033[0m')\n logo = logo.replace('(', '\\033[35m(\\033[0m')\n logo = logo.replace(')', '\\033[33m)\\033[0m')\n logo = logo.replace('/', '\\033[32m/\\033[0m')\n logo = logo.replace('\\\\', '\\033[32m\\\\\\033[0m')\n print(logo)\n\n\ndef main(pcapFileNames):\n print(info('Started [at] {}\\n'.format(fetchFormatedTime())))\n try:\n for pcapFileName in pcapFileNames:\n print(info('Opening File -> {}'.format(pcapFileName)))\n packets = rdpcap(pcapFileName)\n print(good('File read sequence compleated.'))\n data = b''\n print(\n info(\n 'Reading and, merging data from provided sniffed packets .'\n )\n )\n for packet in packets[Raw]:\n data += packet.load\n print(\n good(\n 'Data Successfully Extracted -> {} bytes'.format(len(data))\n )\n )\n print(\n info(\n 'Now, writing the Data to -> {}'.format(\n pcapFileName+'.out'\n )\n )\n )\n with open(pcapFileName+'.out', 'wb') as outFile:\n outFile.write(data)\n outFile.close()\n print(good('Done!\\n'))\n except KeyboardInterrupt:\n print(bad('SIGINT recieved, terminating.'))\n coolExit(0)\n except Exception as exception:\n print(bad('Ugh! Error -> {}'.format(exception)))\n coolExit(1)\n coolExit(0)\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='loXract_{} by {}'.format(__version__,\n __author__))\n if not arguments['--help']:\n banner()\n if arguments['']:\n pcapFileNames = arguments['']\n else:\n pcapFileNames = coolInput('Filename(s) Separated by double ').\\\n split(' ')\n main(pcapFileNames)\n","repo_name":"naryal2580/pythonScripts","sub_path":"loXract.py","file_name":"loXract.py","file_ext":"py","file_size_in_byte":3266,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"481879676","text":"\"\"\" Flask-Script commands for starting/managing Gunicorn \"\"\"\nimport subprocess\nimport time\n\nfrom sys import stderr\n\nfrom flask_migrate import upgrade as db_upgrade\nfrom gunicorn.app.base import BaseApplication\nfrom pika.exceptions import AMQPError\nfrom py.path import local # pylint:disable=import-error\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.exc import OperationalError\n\nfrom dockci.server import APP, app_init, get_db_uri, get_pika_conn, MANAGER\nfrom dockci.util import project_root\n\n\nclass GunicornWrapper(BaseApplication): # pylint:disable=abstract-method\n \"\"\" Gunicorn application for DockCI Flask app \"\"\"\n def __init__(self, options=None):\n self.options = options or {}\n super(GunicornWrapper, self).__init__()\n\n def load_config(self):\n \"\"\" Setup Gunicorn config \"\"\"\n config = dict([(key, value) for key, value in self.options.items()\n if key in self.cfg.settings and value is not None])\n for key, value in config.items():\n self.cfg.set(key.lower(), value)\n\n # TODO required for streaming logs, but a bad idea in other cases\n self.cfg.set('timeout', 0)\n\n def load(self):\n \"\"\" Get the Flask app \"\"\"\n return APP\n\n\n@MANAGER.option(\"-w\", \"--workers\",\n help=\"Number of gunicorn workers to start\",\n default=10)\n@MANAGER.option(\"--bind\",\n help=\"Interface, and port to listen on\",\n default=\"127.0.0.1:5000\")\n@MANAGER.option(\"--debug\",\n help=\"Turn debug mode on for Flask, and stops app preload for \"\n \"auto reloading\",\n default=False, action='store_true')\n@MANAGER.option(\"--db-migrate\",\n default=False, action='store_true',\n help=\"Migrate the DB on load\")\n@MANAGER.option(\"--timeout\",\n default=0, type=int,\n help=\"Time to wait for the resources to be available\")\n@MANAGER.option(\"--collect-static\",\n default=False, action='store_true',\n help=\"Collect static dependencies before start\")\ndef run(**kwargs):\n \"\"\" Run the Gunicorn worker \"\"\"\n kwargs['reload'] = kwargs['debug']\n kwargs['preload'] = not kwargs['debug']\n APP.debug = kwargs['debug']\n\n if kwargs['collect_static']:\n subprocess.check_call('./_deps_collectstatic.sh',\n cwd=project_root().strpath)\n\n if kwargs['timeout'] != 0:\n start_time = time.time()\n db_engine = create_engine(\n get_db_uri(),\n connect_args=dict(connect_timeout=2),\n )\n db_conn = None\n mq_conn = None\n while time.time() - start_time < kwargs['timeout']:\n try:\n if db_conn is None or db_conn.closed:\n db_conn = db_engine.connect()\n except OperationalError:\n time.sleep(2)\n continue\n\n try:\n if mq_conn is None:\n mq_conn = get_pika_conn()\n except AMQPError:\n time.sleep(2)\n continue\n\n break\n\n if db_conn is None or db_conn.closed:\n stderr.write(\"Timed out waiting for the database to be ready\\n\")\n return 1\n\n if mq_conn is None:\n stderr.write(\"Timed out waiting for RabbitMQ to be ready\\n\")\n return 1\n\n # Setup the exchange\n channel = mq_conn.channel()\n channel.exchange_declare(exchange='dockci.job', type='topic')\n channel.exchange_declare(exchange='dockci.queue', type='topic')\n channel.queue_declare(queue='dockci.agent')\n channel.queue_bind(exchange='dockci.queue',\n queue='dockci.agent',\n routing_key='*')\n mq_conn.close()\n\n if kwargs['db_migrate']:\n db_upgrade( # doesn't return anything\n local(__file__).dirpath().join('../../alembic').strpath\n )\n\n else:\n # Migrate will init the app for us\n app_init()\n\n GunicornWrapper(kwargs).run()\n","repo_name":"sprucedev/DockCI","sub_path":"dockci/commands/gunicorn.py","file_name":"gunicorn.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"46"} +{"seq_id":"21164020079","text":"\"\"\"\nSimple implementation that can be used for a 16 bit anonymous DC-protocol.\n\"\"\"\n\nimport numpy as np\nimport scipy as sp\nimport time\n\n\n\ndef xor(x, y):\n\tresult = \"\";\n\tfor i in range (0, len(x)):\n\t\tif (x[i] == y[i]):\n\t\t\tresult += \"0\";\n\t\telse:\n\t\t\tresult += \"1\";\n\treturn result;\ndef invXor(x, y):\n\tresult = \"\";\n\tfor i in range (0, len(x)):\n\t\tif (x[i] == y[i]):\n\t\t\tresult += \"1\";\n\t\telse:\n\t\t\tresult += \"0\";\n\treturn result;\n\ndef toHex(x):\n\treturn \"%X\" % int(x, 2);\n\t#return x.encode('hex_codec')\n\n#My own conversion to binary. \ndef toBinary(x):\n\tresult = \"\"\n\tfor i in range (0, len(x)):\n\t\tif (x[i].lower() == \"f\"):\n\t\t\tresult += \"1111\";\n\t\telif (x[i].lower() == \"e\"):\n\t\t\tresult += \"1110\";\n\t\telif (x[i].lower() == \"d\"):\n\t\t\tresult += \"1101\";\n\t\telif (x[i].lower() == \"c\"):\n\t\t\tresult += \"1100\";\n\t\telif (x[i].lower() == \"b\"):\n\t\t\tresult += \"1011\";\n\t\telif (x[i].lower() == \"a\"):\n\t\t\tresult += \"1010\";\n\t\telif (x[i].lower() == \"9\"):\n\t\t\tresult += \"1001\";\n\t\telif (x[i].lower() == \"8\"):\n\t\t\tresult += \"1000\";\n\t\telif (x[i].lower() == \"7\"):\n\t\t\tresult += \"0111\";\n\t\telif (x[i].lower() == \"6\"):\n\t\t\tresult += \"0110\";\n\t\telif (x[i].lower() == \"5\"):\n\t\t\tresult += \"0101\";\n\t\telif (x[i].lower() == \"4\"):\n\t\t\tresult += \"0100\";\n\t\telif (x[i].lower() == \"3\"):\n\t\t\tresult += \"0011\";\n\t\telif (x[i].lower() == \"2\"):\n\t\t\tresult += \"0010\";\n\t\telif (x[i].lower() == \"1\"):\n\t\t\tresult += \"0001\";\n\t\telif (x[i].lower() == \"0\"):\n\t\t\tresult += \"0000\";\n\treturn result;\t\n\n\"\"\"\n\nSA = Your shared 16-bit secret with alice\nSB = Your shared secret with bob. \nDA = The broadcasted data sent by Alice.\nDB = The broadcasted data sent by bob.\nM = 16 bit message that you wish to send anonymously. \nb = 1 means you wish to send the message. 0 = you do not want to send the message.\n\nOutput = the broadcasted data if b = 0 and the broadcasted data + anonymous message (0000 if no anonymous message was sent)\n\n\"\"\"\n\ndef DC(SA, SB, DA, DB, M, b):\n\tif (b == 1):\n\t\tresult = toHex(xor(xor(toBinary(SA), toBinary(SB)), toBinary(M)))\n\t\treturn result.zfill(4);\n\t\t\t\n\telse:\n\t\tresult1 = xor(toBinary(SA), toBinary(SB));\n\t\tresult2 = xor(toBinary(DA), toBinary(DB))\n\t\treturn toHex(result1).zfill(4) + (toHex(xor(result1, result2)).zfill(4));\n\"\"\"\ntest vectors \nSA = \"27C2\"\nSB = \"0879\"\nDA = \"35F6\"\nDB = \"1A4D\"\nM = \"27BC\"\nb = 1\n\n\"\"\"\n\nSA = \"4303\"\nSB = \"1119\"\nDA = \"5137\"\nDB = \"032D\"\nM = \"B571\"\nb = 1\n\n\n\nprint (DC(SA, SB, DA, DB, M, b));\n\n","repo_name":"Zerox-1337/dc-net","sub_path":"dc.py","file_name":"dc.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"6960610109","text":"import boto3\n\nregion = 'eu-west-1'\n\n\ndef lambda_handler(event, context):\n instance_list = []\n client = boto3.client('autoscaling')\n response = client.describe_auto_scaling_groups(AutoScalingGroupNames=['terraform-20230608075321715400000005'])\n respo = response['AutoScalingGroups']\n print(respo)\n for i in respo:\n b = i['Instances']\n for c in b:\n f = c['InstanceId']\n instance_list.append(f)\n \n the_response = client.suspend_processes(\n AutoScalingGroupName='terraform-20230608075321715400000005',\n ScalingProcesses=['AlarmNotification','Launch', 'Terminate', 'AddToLoadBalancer', 'AZRebalance', 'HealthCheck', 'ScheduledActions', 'InstanceRefresh', 'ReplaceUnhealthy'])\n \n ec2 = boto3.client('ec2', region_name = 'eu-west-1')\n for id in instance_list:\n ec2.stop_instances(InstanceIds=[id])\n \n","repo_name":"nikolovatanass/small-infrastructure","sub_path":"lambda.py","file_name":"lambda.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"29030096411","text":"import os\nimport re\nfrom pathlib import Path\n\nimport fire\n\nimport gkit as gk\nfrom gkit.math import *\n\n\ndef loader(*args):\n for fn in args:\n if \"re://\" in fn:\n fn = re.compile(fn[5:])\n files = filter(\n lambda f: re.search(fn, str(f)) is not None,\n Path(\".\").rglob(\"*\")\n )\n for fn in list(files):\n yield gk.read(str(fn))\n else:\n yield gk.read(fn)\n\n\nclass CLI(object):\n @staticmethod\n def clip_by_shp(shp_path, out=\"./\", *args, **kwargs):\n if not os.path.exists(out):\n os.makedirs(out)\n\n for r in loader(*args):\n if kwargs.get('print'):\n print(r.filepath)\n res = r.clip_by_shp(shp_path)\n res.save(os.path.join(out, os.path.basename(r.filepath)))\n\n @staticmethod\n def map(formula, out=\"./\", *args, **kwargs):\n if not os.path.exists(out):\n os.makedirs(out)\n\n for r in loader(*args):\n if kwargs.get('print'):\n print(r.filepath)\n res = eval(formula)\n res.save(os.path.join(out, os.path.basename(r.filepath)))\n\n @staticmethod\n def calc(formula, out='out', *args, **kwargs):\n if not args:\n return\n\n out = Path(out).absolute()\n if not out.parent.exists():\n os.makedirs(str(out.parent))\n\n r = list(loader(*args))\n\n if kwargs.get('print'):\n for i in r:\n print(i.filepath)\n\n if len(r) == 1:\n r = r[0]\n\n res = eval(formula)\n res.save(str(out))\n\n @staticmethod\n def show(raster):\n \"\"\"Display a raster file.\"\"\"\n gk.read(raster).show()\n\n\ndef main():\n fire.Fire(CLI)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TitorX/gkit","sub_path":"gkit/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"34527135622","text":"# -*- coding: utf-8 -*-\n# @Autor: drbdrb\n# @Date: 2019-12-26 09:35:35\n# @LastEditTime: 2020-03-08 16:36:42\n# @Description: 聊天机器人,只要实现Text消息响应即可。\n\nimport json\nimport logging\nimport re\nimport string\nimport urllib.request\nfrom urllib.parse import urlencode\nimport jwt\nfrom MsgProcess import MsgProcess, MsgType\n\n\nclass Chat(MsgProcess):\n def __init__(self, msgQueue):\n super().__init__(msgQueue=msgQueue)\n self.gameTalk = False\n\n # 处理文本消息\n def Text(self, message):\n text = message['Data']\n # self.qingyunke(text)\n self.WXSpeech(text)\n\n def qingyunke(self, text):\n \" 青云客聊天机器人 \"\n if text is None:\n return\n url = r'http://api.qingyunke.com/api.php?key=free&appid=0&msg='\n sendurl = url + text\n url = urllib.parse.quote(sendurl, safe=string.printable)\n page = urllib.request.urlopen(url)\n if page.getcode() == 200:\n html = page.read().decode(\"utf-8\")\n # 判断返回数据是否是字典,可能返回的是数字所以str\n if re.sub(r'{.*}', \"\", str(html)) == \"\":\n res = json.loads(html)['content']\n self.say(res)\n\n def WXSpeech(self, text):\n ''' 腾讯聊天机器人 '''\n if text is None:\n return\n payload = {\n 'APPID': 'kWf7I8VjBre1BXP',\n 'TOKEN': 'o2GQF8Ri73Crcn9WiGH8X3K4e6huGb',\n 'EncodingAESKey': '2uetECGe8oob6bbSvLj4DBNjPh2epX1y8mws0pENF5g'\n }\n apiUrl = r'https://openai.weixin.qq.com/openapi/message/'\n header = {\"username\": self.CUID, \"msg\": text}\n headers = {'alg': \"HS256\"}\n query = jwt.encode(header, payload['EncodingAESKey'],\n algorithm=headers['alg'], headers=headers).decode('ascii')\n url = apiUrl + payload['TOKEN']\n post_json = {\"query\": query}\n post_json = urlencode(post_json).encode('utf-8')\n try:\n page = urllib.request.urlopen(url, data=post_json, timeout=5) # 响应时间定为了5秒\n except Exception as e:\n msg = \"和腾讯连接失败\"\n logging.warning(\"{}:{}\".format(msg, e))\n self.send(MsgType=MsgType.Text, Receiver='Screen', Data=msg)\n return\n # 判断网络请求成功\n if page.getcode() == 200:\n html = page.read().decode(\"utf-8\")\n if re.search(r'{.*}', str(html)):\n res = json.loads(html)\n if 'answer_type' not in res.keys():\n logging.warning(res)\n return\n\n if res['answer_type'] == 'text':\n answer = res['answer']\n logging.info(answer)\n self.say(answer)\n\n if self.gameTalk: \n userWords = self.listen(15)\n if userWords:\n Triggers = ['退出', '停止', '关闭', '不玩了']\n if not any(map(lambda trigger: trigger in userWords, Triggers)):\n return self.WXSpeech(userWords)\n logging.info('关闭连续对话')\n self.gameTalk = False\n return self.WXSpeech('退出游戏') \n\n if '欢迎来到成语接龙' in answer:\n userWords = self.listen()\n Triggers = ['准备好了', '开始游戏']\n if any(map(lambda trigger: trigger in userWords, Triggers)):\n self.gameTalk = True\n logging.info('开启连续对话')\n return self.WXSpeech(userWords)\n \n if answer[-1] == '?' or answer[-1] == '?' and self.gameTalk is False: # 一次连续对话\n return self.WXSpeech(self.listen())\n return \n\n elif res['answer_type'] == 'music':\n if res['ans_node_name'] == '音乐':\n music_ans_detail = list(res['more_info'].values())[0]\n music_dict = json.loads(music_ans_detail)\n music_list = music_dict['play_command']['play_list']\n songList = list()\n for k in music_list:\n songname = '{}--{}'.format(k['name'], k['author']).strip()\n songList.append(\n dict({'songname': songname, 'songurl': k['url']}))\n \n self.send(MsgType=MsgType.QuitGeekTalk, Receiver='ControlCenter')\n answer = res['answer']\n self.say(answer)\n\n # 歌单格式: songList =[{'songname':name,'songurl':url},{...}...]\n self.send(MsgType=MsgType.LoadPlugin, Receiver='ControlCenter', Data='Music')\n self.send(MsgType=MsgType.Text, Receiver='Music', Data=songList)\n return\n\n elif res['ans_node_name'] == 'FM-笑话':\n screen = res['ans_node_name'] + '功能尚未启用'\n logging.debug(screen) \n return\n \n elif res['answer_type'] == 'news':\n try: \n news_ans_detail = res['more_info']['news_ans_detail'] \n news_ans_detail = json.loads(news_ans_detail)\n docs = news_ans_detail['data']['docs']\n except Exception as e:\n logging.debug(\"%s %s\" % (news_ans_detail, e))\n self.say('新闻获取失败')\n return \n titles = ''\n text = '' \n for news in docs:\n titles += (news['title'] + '\\n')\n text += (news['title'] + ':' + news['abs_s'] + '\\n')\n self.send(MsgType=MsgType.QuitGeekTalk, Receiver='ControlCenter')\n self.send(MsgType=MsgType.Text, Receiver='Screen', Data=titles)\n self.send(MsgType=MsgType.Text, Receiver='SpeechSynthesis', Data=text)\n # logging.debug(text)\n\n else: \n screen = res['ans_node_name'] + '功能尚未启用'\n logging.debug(screen)\n else:\n self.gameTalk = False\n logging.warning('网络可能有点问题,请检查一下网络')\n","repo_name":"drbdrb/zimei","sub_path":"python/plugin/Chat/Chat.py","file_name":"Chat.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"46"} +{"seq_id":"35688053690","text":"\"\"\"Mutation data parser\n\nThis module contains functions that can take files from the Uniprot\ndatabase and parse the information into JSON format.\"\"\"\n\nimport base64\nimport json\nimport copy\n\nimport jsonschema\nimport numpy as np\n\nfrom .uniprot_database_tools import pfam_domain_parser\n\nEMPTY_MUT_DATA = dict(\n x=[],\n y=[],\n mutationGroups=[],\n domains=[],\n)\n\nPFAM_DOM_SCHEMA = {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"region\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"text\": {\"type\": \"string\"},\n \"start\": {\"type\": \"string\"},\n \"end\": {\"type\": \"string\"},\n },\n \"required\": [\"text\", \"start\", \"end\"]\n }\n },\n },\n \"required\": [\"regions\"]\n }\n}\n\nPROT_DOM_SCHEMA = {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"coord\": {\"type\": \"string\"},\n },\n \"required\": [\"name\", \"coord\"]\n }\n}\n\nARR_SCHEMA = {\n \"type\": \"array\",\n \"items\": {\"type\": [\"string\", \"number\"]}\n}\n\nMUT_DATA_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"x\": ARR_SCHEMA,\n \"y\": ARR_SCHEMA,\n \"mutationGroups\": ARR_SCHEMA,\n \"domains\": PROT_DOM_SCHEMA,\n },\n \"required\": [\"x\"],\n}\n\n\ndef parse_mutations_uniprot_data(gff_data, start='start', stop='end', mut_types_to_skip=None):\n \"\"\"parse a gff file downloaded into a pandas DataFrame from Uniprot\n :param gff_data: pandas DataFrame\n :param start: the name of the column containing the start coordinate of the mutation\n :param stop: the name of the column containing the start coordinate of the mutation\n :param mut_types_to_skip: a list of mutations types to skip\n :return: formatted mutation data for the dash_bio.NeedlePlot component\n \"\"\"\n if mut_types_to_skip is None:\n mut_types_to_skip = [\n 'Chain', # This is the whole protein\n 'Region', # Those are better described in pfam database\n ]\n\n if 'Chain' not in mut_types_to_skip:\n mut_types_to_skip.append('Chain')\n\n # Selects the various mutations types in the dataset, except types contained in the above list\n mut_types = gff_data['mut'].loc[~gff_data['mut'].isin(mut_types_to_skip)].value_counts().index\n\n x = np.array([]).astype('str')\n y = np.array([]).astype('str')\n mutationgroups = np.array([]).astype('str')\n\n for mut_type in mut_types:\n\n # Selects the start and end protein coordinates of the mutation\n data_coord = gff_data[gff_data.mut == mut_type][[start, stop]]\n\n # Sort between the single and multi-site coordinates\n single_sites = data_coord.loc[data_coord[start] == data_coord[stop]]\n multi_sites = data_coord.loc[data_coord[start] != data_coord[stop]]\n\n # Joins the start and end coordinates into one string\n multi_sites['sep'] = \"-\"\n multi_sites[start] = \\\n multi_sites[start].map(str) \\\n + multi_sites['sep'] \\\n + multi_sites[stop].map(str)\n\n # Merge the single and multi-site coordinates in one columns and counts the occurrences\n sorted_data = single_sites[start].append(multi_sites[start]).value_counts()\n n = (len(sorted_data.index))\n\n x = np.append(x, np.array(sorted_data.index).astype('str'))\n y = np.append(y, np.array(sorted_data.values).astype('str'))\n mutationgroups = np.append(mutationgroups, np.repeat(mut_type, n))\n\n formatted_data = dict(\n x=x.tolist(),\n y=y.tolist(),\n mutationGroups=mutationgroups.tolist(),\n domains=[],\n )\n jsonschema.validate(formatted_data, MUT_DATA_SCHEMA)\n return formatted_data\n\n\ndef parse_protein_domains_data(domain_data):\n \"\"\"take a json object loaded from a local file or from the PFAM database and\n format it for the app.\n :param (str) domain_data: json object. It needs to have a structure with a\n 'region' field under which 'text', 'start' and 'end'\n fields must be present as well.\n :return: JSON-like structure with protein domain information formatted for dash_bio\n needle plot component\n \"\"\"\n region_key = 'regions'\n region_name_key = 'text'\n region_start_key = 'start'\n region_stop_key = 'end'\n\n formatted_data = []\n\n try:\n jsonschema.validate(domain_data, PFAM_DOM_SCHEMA)\n is_pfam = True\n domain_data = domain_data[0]\n for region in domain_data[region_key]:\n formatted_data.append(\n dict(\n name=\"%s\" % region[region_name_key],\n coord=\"%i-%i\" % (region[region_start_key], region[region_stop_key])\n )\n )\n\n except jsonschema.exceptions.ValidationError:\n is_pfam = False\n\n if not is_pfam:\n try:\n jsonschema.validate(domain_data, PROT_DOM_SCHEMA)\n formatted_data = domain_data\n except jsonschema.exceptions.ValidationError:\n print(\"Your .json file did not match the scheme from PFAM or needleplot domain data\")\n\n return formatted_data\n\n\ndef parse_mutation_data(mutation_data):\n \"\"\"take a json object and extract the mutation data based one the schema EMPTY_MUT_DATA\n :param (dict) mutation_data:\n :return: formatted mutation data for the dash_bio.NeedlePlot component\n \"\"\"\n data = copy.deepcopy(EMPTY_MUT_DATA)\n jsonschema.validate(mutation_data, MUT_DATA_SCHEMA)\n for k in data:\n data[k] = mutation_data[k]\n\n return data\n\n\ndef load_protein_domains(accession=None, json_fname=None):\n \"\"\"take a json file from a local file or from the PFAM database and\n format it for the app.\n :param (str) json_fname: name of a JSON file. This JSON file needs to\n have a structure with a 'region' field under\n which 'text', 'start' and 'end' fields must\n be present as well.\n :param (str) accession: the mutation accession number\n :return: JSON structure with protein domain information formatted for dash_bio\n needle plot component\n\n In case both argument have non-None values, the data from PFAM website will\n be chosen over the one of the JSON file.\n \"\"\"\n if json_fname is not None:\n with open(json_fname, encoding='utf-8') as f:\n domain_data = json.load(f)\n\n if accession is not None:\n domain_data = pfam_domain_parser(accession)\n\n return parse_protein_domains_data(domain_data)\n\n\ndef load_mutation_data(json_fname=None):\n \"\"\"take a json object and extract the mutation data based one the schema EMPTY_MUT_DATA\n :param (str) json_fname: name of a JSON file. This JSON file needs to\n have a structure like EMPTY_MUT_DATA\n :return: formatted mutation data for the dash_bio.NeedlePlot component\n \"\"\"\n if json_fname is not None:\n with open(json_fname, encoding='utf-8') as f:\n mutation_data = json.load(f)\n\n return parse_mutation_data(mutation_data)\n\n\ndef decode_dcc_upload_contents(contents, encoding='utf-8'):\n \"\"\" decodes the content returned by a dcc.Upload component's callback\n :param contents: a string with a comma separating the content type and the\n encoded content\n :param encoding: the encoding convention of the encoded content\n :return: decoded string\n \"\"\"\n decoded = \"\"\n if contents is not None:\n _, content_string = contents.split(',')\n decoded = base64.b64decode(content_string)\n decoded = decoded.decode(encoding)\n return decoded\n\n\ndef parse_mutation_upload_file(contents, fname):\n \"\"\"\n :param (str) contents: returned by a dcc.Upload 'contents' prop\n :param (str) fname: the filename associated with the dcc.Upload component\n :return: formatted mutation data for the dash_bio.NeedlePlot component\n \"\"\"\n data = copy.deepcopy(EMPTY_MUT_DATA)\n upload_data = decode_dcc_upload_contents(contents)\n\n if upload_data:\n if fname.endswith('json'):\n # Assume that the user uploaded a json file\n json_data = json.loads(upload_data)\n data = parse_mutation_data(json_data)\n\n return data\n\n\ndef parse_domain_upload_file(contents, fname):\n \"\"\"\n :param (str) contents: returned by a dcc.Upload 'contents' prop\n :param (str) fname: the filename associated with the dcc.Upload component\n :return: formatted protein domain data for the dash_bio.NeedlePlot component\n \"\"\"\n data = []\n upload_data = decode_dcc_upload_contents(contents)\n\n if upload_data:\n if fname.endswith('json'):\n # Assumes that the user uploaded a json file\n json_data = json.loads(upload_data)\n data = parse_protein_domains_data(json_data)\n\n return data\n","repo_name":"plotly/dash-bio","sub_path":"dash_bio/utils/mutation_data_parser.py","file_name":"mutation_data_parser.py","file_ext":"py","file_size_in_byte":9105,"program_lang":"python","lang":"en","doc_type":"code","stars":493,"dataset":"github-code","pt":"46"} +{"seq_id":"73585584138","text":"import json\nimport string\nfrom bs4 import BeautifulSoup\nimport re\nfrom nltk.stem import PorterStemmer\nimport glob\nfrom contextlib import ExitStack\nimport subprocess\n\nclass Index():\n def __init__(self):\n self.doc_id = {} # associate each url with an id, e.g. {\"https://ics.uci.edu\": 1, \"https://reddit.com\": 2}\n self.current_id = 1 # increment each time after an id is associated with a document\n self.token_posting = {} # associate each token with the document where it appears, e.g. {\"anteater\": [(1,3),(5,2)], \"zot\": [(1,4)]}\n self.tokens = []\n self.file_num = 0\n self.occurrences = {}\n\n # this function expects the name of a file as a string. it will attempt to open the file and \n # use the json library to extract the content attribute from the json file. Lastly, it will\n # send the content of the file as a string to the tokenize function to have it return the list \n # of tokens. the list returned from tokenize is immediately returned by this function as well.\n # Additionally, it will also call assign_ID()\n def extract_content(self, file: str) -> list:\n try:\n # read from a json file\n with open(file, 'r') as f:\n # extract content from json files\n data = json.load(f)\n except: \n print(\"Could not open JSON file..!\")\n \n # assign url to an id\n self.assign_ID(data['url'])\n\n # return a list of words including stop words\n return self.tokenize(data['content'])\n\n # This function is called by extract_content() only. It will assign the url to a unique id.\n # The key/value pair is then added to doc_id dictionary.\n # example, {\"https://ics.uci.edu\": 1}\n def assign_ID(self, url: str):\n # checks to make sure that the url is not in the dictionary, if it is do nothing\n # if it is not add it into the dictionary.\n if url not in self.doc_id:\n\n # Updates the dictionary with the url and assigns it an id\n self.doc_id.update({self.current_id: url})\n\n # Updates the current id\n self.current_id = self.current_id + 1 \n \n # this function uses BeautifulSoup to parse the content attribute of the JSON file.\n # this function will return a 2D list of tokens; element 0 will contain a list of phrases\n # that are considered important text and element 1 will contain individual words found\n # within less important tags, such as

and

  • \n def tokenize(self, content: str) -> list:\n tokens = []\n self.occurrences = {} # reset the occurences dictionary\n\n # create a BS object to parse content attribute from JSON file\n soup = BeautifulSoup(content, \"html.parser\")\n\n # the different tags we will use to parse text from each page's contents\n important_tags = ['meta', 'b', 'strong', 'header', 'title', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']\n relevant_tags = ['p', 'li']\n \n #Turns the returned sets into lists to process them\n tokens.append(self.parse_tags(soup, important_tags))\n tokens.append(self.parse_tags(soup, relevant_tags))\n\n # print(self.occurrences)\n return tokens\n\n # create posting for the token\n # example, {“anteater”: [(1,3),(5,2)], “zot”: [(3,6)]}\n def create_posting(self, token_list: list):\n ps = PorterStemmer()\n id = self.current_id - 1 # subtracting 1 is needed to get the correct document id, since curren_id is incremented by 1 in assign_ID before this function is called\n\n for i, l in enumerate(token_list): # for each list\n for token in l: # for each token\n # for important words\n if i == 0:\n # split the tokens in the important words phrase\n tokens = token.split() \n \n # stem each of the individual tokens\n for t in tokens:\n t = ps.stem(t)\n \n # update occurrences for new tokens\n if t in self.occurrences.keys():\n self.occurrences[t] += 1\n else:\n self.occurrences[t] = 1\n\n # if token is already in the posting\n if t in self.token_posting.keys():\n for i in range(0, len(self.token_posting[t]), 2):\n if self.token_posting[t][i] == id:\n self.token_posting[t][i+1] += self.occurrences[t] + 10 \n break\n else:\n self.token_posting[t].extend([ id, self.occurrences[t]+10 ])\n # self.token_posting[t].append(tuple([id, self.occurrences[t]+10 ]))\n else:\n self.token_posting[t] = [ id, self.occurrences[t]+10 ]\n # for non-important words\n else:\n # if token is already in the posting\n if token in self.token_posting.keys():\n for i in range(0, len(self.token_posting[token]), 2):\n if self.token_posting[token][i] == id:\n self.token_posting[token][i+1] += self.occurrences[token] \n break\n else:\n self.token_posting[token].extend([ id, self.occurrences[token] ])\n # self.token_posting[token].append(tuple([id, self.occurrences[token]]))\n else:\n self.token_posting[token] = [ id, self.occurrences[token] ]\n\n # This function creates the inverse index file and writes to the disk from the memory.\n # It will also empty the memory before the next iteration is called.\n def create_index(self) -> str:\n # path name\n tName = './indexes/index'\n\n # file name\n fName = '%s%d.txt' % (tName, self.file_num)\n\n # open file\n with open(fName, 'w', encoding='utf-8') as file:\n for token in self.token_posting.keys():\n file.write(token + '\\t') # print the key\n for i, item in enumerate(self.token_posting[token]):\n if i % 2 == 0:\n file.write('(' + str(item) + ',')\n else:\n file.write(str(item) + ') ') # posting print format \n file.write('\\n') # write new line, final result is: 'token' (1,4)\n \n # increment file number\n self.file_num += 1\n\n # empty the posting memory before the next iteration\n self.token_posting = {}\n\n # return the file name for sorting of file\n return fName\n \n \n # This function parse the content section of the json file and return \n # the tokens that are found\n def parse_tags(self, soup: BeautifulSoup, tag_list: list,) -> set:\n tokens = []\n\n for tag in tag_list:\n # find all of the following tags\n results = soup.find_all(tag)\n \n # parse each result \n for result in results:\n\n # this list will temporarily store the tokens found in the current result from the\n # soup.find_all function call to be checked against a frequency map and added to\n # a tokens set\n temp_tokens = []\n\n if tag == 'meta':\n # filter out only the important meta tags such as the page's description and author(s)\n if 'name' in result.attrs.keys() and (result.attrs['name'] == 'description' or result.attrs['name'] == 'author'):\n if 'content' in result.attrs.keys():\n temp_tokens.append(result.attrs['content'])\n # split normal tags into separate words\n elif tag == 'p' or tag == 'li':\n temp_tokens = result.text.split()\n # treat the entire \"important text\" phrase as a token\n else:\n temp_tokens.append(result.text)\n\n # perform cleanup on our tokens\n temp_tokens = self.token_clean_up(temp_tokens)\n ps = PorterStemmer() # imported stemmer to let occurences contain stemmed tokens\n stem_tokens = [ps.stem(t) for t in temp_tokens]\n for t in stem_tokens:\n # either add a new token to the list, or increment its counter\n if t in self.occurrences.keys():\n self.occurrences[t] += 1\n else:\n self.occurrences[t] = 1\n tokens.append(t)\n\n return tokens \n\n # This function will aid in clean-up of tokens by removing any non-alphanumeric characters\n def token_clean_up(self, tokens):\n for i, text in enumerate(tokens):\n # first combinate any contractions by removing apostrophes\n p = re.compile('[’\\']')\n tokens[i] = p.sub('', tokens[i])\n\n # then replace any character that isn't a number or letter with a space\n p = re.compile('[^a-zA-Z0-9]')\n tokens[i] = p.sub(' ', tokens[i])\n \n p = re.compile(' +')\n # lastly, remove any remaining extra spaces\n tokens[i] = p.sub(' ', tokens[i])\n\n # remove any leading and trailing spaces\n tokens[i] = tokens[i].strip()\n\n # delete empty tokens\n tokens = list(filter(None, tokens))\n\n return tokens\n\n # this function will locate every partial index and then combine them all to create\n # a single index with all of the tokens sorted in alphabetical order first, and each\n # token's doc_ids sorted in ascending order\n def merge_partial_indexes(self):\n # find all partial indexes within the indexes directory\n #partial_indexes = glob.glob('indexes/index*.txt')\n partial_indexes = glob.glob('indexes/index*.txt')\n\n # output file to write to\n f_output = open(\"index.txt\", \"w\")\n \n # open an input buffer for each partial index\n with ExitStack() as stack:\n files = [stack.enter_context(open(fname)) for fname in partial_indexes]\n\n lines = []\n done = True\n\n # read a line from each open file\n for file in files:\n lines.append(file.readline())\n\n # check if any of the lines have text to parse\n for line in lines:\n # if at least one line has text, then we must not be done\n if line:\n done = False\n break\n else:\n done = True\n\n # while at least one file still has a line to read\n while (not done):\n first_index = 0\n while first_index < len(lines) and not lines[first_index]:\n first_index += 1\n \n # determine which line's token comes first (alphabetically)\n for i, line in enumerate(lines):\n # always compare the current line with the previous first line (alphabetically)\n if lines[i] and lines[i] < lines[first_index]:\n first_index = i\n\n same_line_indexes = []\n first_token = ''\n\n # get just the token of the leading line\n for i, char in enumerate(lines[first_index]):\n if char == '(':\n first_token = lines[first_index][0:i-1]\n break\n\n # check if any other line contains the same token\n for i, line in enumerate(lines):\n # ignore the smallest\n # if i != first_index:\n # get just the token of the current line\n for j, char in enumerate(lines[i]):\n if char == '(':\n token = lines[first_index][0:j-1]\n break\n else:\n token = ''\n \n if token == first_token:\n same_line_indexes.append(i)\n\n # if there at least a matching token\n if len(same_line_indexes) > 1:\n\n doc_ids = []\n # parse each token's doc_ids/frequencies\n for index in same_line_indexes:\n doc_id, frequency = self.parse_line(lines[index])\n for i, _ in enumerate(doc_id):\n doc_ids.append((doc_id[i], frequency[i]))\n\n # sort the doc_ids in ascending order\n doc_ids = sorted(doc_ids, key = lambda X: int(X[0]))\n \n f_output.write(token + '\\t')\n i = 0\n\n # these nested loops use a 2 pointer approach to find all matching\n # doc ids and sum their frequencies for merging into a single pair\n while i < len(doc_ids):\n sum = 0\n j = i\n while (j < len(doc_ids) and doc_ids[i][0] == doc_ids[j][0]):\n sum += int(doc_ids[j][1])\n j += 1\n f_output.write('(' + str(doc_ids[i][0]) + ',' + str(sum) + ') ')\n i = j\n \n f_output.write('\\n')\n\n # empty every line from this section to remove it from next iteration\n for i in same_line_indexes:\n lines[i] = ''\n else:\n # write the line that comes first to file\n f_output.write(lines[first_index])\n\n # empty the current line to remove it from next iteration\n lines[first_index] = ''\n\n # read a line from each open file\n for i, file in enumerate(files):\n if not lines[i]:\n lines[i] = file.readline()\n\n # check if any of the lines have text to parse\n for line in lines:\n # if at least one line has text, then we must not be done\n if line:\n done = False\n break\n else:\n done = True\n\n # delete all partial indexes from the indexes directory\n bash_command = 'rm indexes/*'\n process = subprocess.Popen(bash_command, shell=True, stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n f_output.close()\n\n # move the new complete index from the base directory (of this project) into indexes/\n bash_command = 'mv index.txt indexes/'\n process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE)\n output, error = process.communicate()\n\n #this function will parse the line of the file to get the \n #doc ids from the line\n def parse_line(self, line:string):\n tokens = line.split()\n doc_id = []\n token_occurrences = []\n #Gets the numbers of the ids that relate to the token\n \n start_of_doc_ids = 0\n\n # find where the pairs begin\n for i, token in enumerate(tokens):\n if token[0] == '(':\n start_of_doc_ids = i\n break\n\n # trim the tokens to start at the document ids\n tokens = tokens[start_of_doc_ids:]\n\n # for each pair of (document_id,occurrences)\n for pair in tokens:\n # check each char of the pair\n for i, char in enumerate(pair):\n # find the comma\n if char == ',':\n # append the left side of the comma to the list of doc_ids\n doc_id.append(pair[1:i])\n # the right side to the list of occurrences\n token_occurrences.append(pair[i+1:-1])\n break\n\n return doc_id, token_occurrences\n\n # This function will return the len of how many doc_ids were found.\n def get_num_of_doc_ids(self):\n return len(self.doc_id)","repo_name":"CitoC/Search-Engine","sub_path":"Indexer.py","file_name":"Indexer.py","file_ext":"py","file_size_in_byte":16634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"11365148922","text":"str_ = str(input().strip())\n\nstr_dict = dict()\n\nkeys = 0\nfor s in str_:\n if s not in str_dict:\n str_dict[s] = str_.count(s)\n keys += 1\n\nodd, even = 0, 0\nfor v in str_dict.values():\n if v % 2 == 0:\n even += 1\n else:\n odd += 1\n\nif (even == keys) or (even == keys - 1):\n print('YES')\nelse:\n print('NO')\n","repo_name":"roommen/hacker_rank","sub_path":"core_cs/algorithms/strings/python/game_of_thrones_1.py","file_name":"game_of_thrones_1.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"46"} +{"seq_id":"39633503590","text":"\"\"\"\nBinary heap implementation\n\"\"\"\n\n\nclass BinaryHeap:\n def __init__(self):\n self.min_heap = []\n self.last_index = -1\n\n def push(self, num):\n self.last_index += 1\n self.min_heap.append(num)\n self._sift_up(self.last_index)\n\n def pop(self):\n if self.length() == 0:\n raise Exception(\"Heap is empty\")\n self.min_heap[0], self.min_heap[self.last_index] = (\n self.min_heap[self.last_index],\n self.min_heap[0],\n )\n elt = self.min_heap[self.last_index]\n self.last_index -= 1\n if self.length() > 1:\n self._sift_down(0)\n\n return elt\n\n def parent(self, index, default_val):\n if index == 0:\n return None, default_val\n parent_index = (index - 1) // 2\n return parent_index, self.min_heap[parent_index]\n\n def left_child(self, index, default_val):\n new_index = 2 * index + 1\n if new_index <= self.last_index:\n return new_index, self.min_heap[new_index]\n return None, default_val\n\n def right_child(self, index, default_val):\n new_index = 2 * index + 2\n if new_index <= self.last_index:\n return new_index, self.min_heap[new_index]\n return None, default_val\n\n def _sift_up(self, index):\n while True:\n parent_idx, parent_val = self.parent(index, self.min_heap[index])\n if parent_val <= self.min_heap[index]:\n break\n self.min_heap[index], self.min_heap[parent_idx] = (\n self.min_heap[parent_idx],\n self.min_heap[index],\n )\n\n index = parent_idx\n\n def _sift_down(self, index):\n val = self.min_heap[index]\n while True:\n left_index, left_val = self.left_child(index, val)\n right_index, right_val = self.right_child(index, val)\n if self.min_heap[index] <= left_val and self.min_heap[index] <= right_val:\n break\n index_to_be_changed_with = -1\n # print(left_val, right_val)\n if left_val < right_val:\n index_to_be_changed_with = left_index\n else:\n index_to_be_changed_with = right_index\n self.min_heap[index], self.min_heap[index_to_be_changed_with] = (\n self.min_heap[index_to_be_changed_with],\n self.min_heap[index],\n )\n index = index_to_be_changed_with\n\n def length(self):\n return self.last_index + 1\n\n\nheap = BinaryHeap()\nheap.push(10)\nheap.push(5)\nheap.push(12)\nheap.push(7)\nheap.push(9)\nheap.push(8)\nheap.push(7)\nheap.push(8)\nprint(\"===============================\")\nwhile heap.length() > 0:\n print(heap.pop(), end=\" \")\n # print(heap.min_heap)\n\nprint()\n","repo_name":"ayush-garg341/python","sub_path":"data_structures/daily_challenges/binary_heap.py","file_name":"binary_heap.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28749599348","text":"from collections import deque\ndef solution(nums, k):\n if not nums:\n return []\n n = len(nums)\n memo = deque()\n sol = []\n for i,u in enumerate(nums[:k]):\n while memo:\n if memo[-1][0] <= nums[i]:\n memo.pop()\n else:\n break\n memo.append((u, i))\n sol.append(memo[0][0])\n for i in range(k, n):\n if memo[0][1] == i-k:\n memo.popleft()\n while memo:\n if memo[-1][0] <= nums[i]:\n memo.pop()\n else:\n break\n memo.append((nums[i], i))\n sol.append(memo[0][0])\n return sol\n\nif __name__ == '__main__':\n a = [9,10,9,-7,-4,-8,2,-6]\n k = 5\n print(solution(a, k))","repo_name":"winlp4ever/algos","sub_path":"sliding-window-maximum.py","file_name":"sliding-window-maximum.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"13964033326","text":"import copy\r\nimport torch\r\nfrom torch import nn\r\nfrom FLAlgorithms.servers.FW_Solver import MinNormSolver\r\n\r\n\r\n\r\ndef FedAvg_MOM(w):\r\n\r\n com_grad = copy.deepcopy(w[0])\r\n\r\n for k in com_grad.keys():# 每一层的权重\r\n W_list = []\r\n for i in range(len(w)):\r\n W_list.append(list(w[i][k]))\r\n alpha, _ = MinNormSolver.find_min_norm_element(W_list)\r\n temp = 0\r\n for i in range(len(w)):\r\n wl = torch.stack(W_list[i],dim=0)\r\n temp = temp + alpha[i] *wl\r\n com_grad[k] = temp\r\n return com_grad","repo_name":"adam4096/Code","sub_path":"PFML_code/FLAlgorithms/servers/MGDA.py","file_name":"MGDA.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"23693101503","text":"t1 = input().replace(\":\", \"\")\nt2 = input().replace(\":\", \"\")\n\n\ndef get_max_min_time(t):\n h = t[:2]\n m = t[2:]\n if \"?\" not in m:\n m_max = m_min = int(m[0]) * 10 + int(m[1])\n elif m[0] == \"?\" and m[1] != \"?\":\n m_max = 5 * 10 + int(m[1])\n m_min = int(m[1])\n elif m[1] == \"?\" and m[0] != \"?\":\n m_max = int(m[0]) * 10+9\n m_min = int(m[0]) * 10\n else:\n m_max = 59\n m_min = 0\n\n if \"?\" not in h:\n h_max = h_min = int(h[0]) * 10 + int(h[1])\n elif h[0] == \"?\" and h[1] != \"?\":\n h_max = int(h[1]) + 20 if int(h[1]) < 4 else int(h[1]) + 10\n h_min = int(h[1])\n elif h[1] == \"?\" and h[0] != \"?\":\n temp = int(h[0]) * 10\n h_max = temp + 9\n h_min = temp\n else:\n h_max = 23\n h_min = 0\n return h_max, h_min, m_max, m_min,\n\n\ndef get_t1_t2(time1, time2):\n max_ = (time2[0] - time1[1]) * 60 + time2[2] - time1[3]\n min_ = (time2[1] - time1[0]) * 60 + time2[3] - time1[2]\n\n return min_, max_\n\n\nt1 = get_max_min_time(t1)\nt2 = get_max_min_time(t2)\n\nres = map(str, get_t1_t2(t1, t2))\nprint(\" \".join(res))\n","repo_name":"syx9527/L1_Python","sub_path":"ranko的手表.py","file_name":"ranko的手表.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"43222506884","text":"from opencensus.tags.validation import is_valid_tag_value\n\n_TAG_VALUE_ERROR = \\\n 'tag value must not be longer than 255 characters ' \\\n 'and of ascii values between 32 - 126'\n\n\nclass TagValue(str):\n \"\"\"The value of a tag\"\"\"\n\n def __new__(cls, value):\n \"\"\"Create and return a new tag value\n\n :type value: str\n :param value: A string representing the value of a key in a tag\n :return: TagValue\n \"\"\"\n if not isinstance(value, cls):\n if not is_valid_tag_value(value):\n raise ValueError(_TAG_VALUE_ERROR)\n return super(TagValue, cls).__new__(cls, value)\n","repo_name":"census-instrumentation/opencensus-python","sub_path":"opencensus/tags/tag_value.py","file_name":"tag_value.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":659,"dataset":"github-code","pt":"46"} +{"seq_id":"5858280451","text":"from __future__ import annotations\n\nimport itertools\nimport typing\nfrom typing import Any, Iterable\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_simple.exceptions import IncompatibleShapesError\nfrom torch_simple.typedefs import Side\n\n\ndef _check_pad_truncate_args_valid(\n x: Tensor,\n shape: tuple[int, ...],\n side: Side | Iterable[Side],\n) -> None:\n sides = typing.get_args(Side)\n\n if isinstance(side, str):\n if side not in sides:\n raise ValueError(f\"side must be one of {sides}, got {side}\")\n else:\n side = tuple(side)\n if any(s not in sides for s in side):\n raise ValueError(f\"all values in side must be one of {sides}, got {side}\")\n\n if len(side) != len(shape):\n raise ValueError(f\"side and shape must have same length, got {side} and {shape}\")\n\n if len(x.shape) != len(shape):\n raise IncompatibleShapesError(\n \"input and output shapes must have same number of dimensions, \"\n f\"got {x.shape} and {shape}\",\n [x],\n )\n\n\ndef pad_to_shape(\n x: Tensor,\n shape: tuple[int, ...],\n align: Side | Iterable[Side] = \"start\",\n strict: bool = True,\n **kwargs: Any,\n) -> Tensor:\n \"\"\"\n Pads x to match a desired shape.\n\n If any dimension of x is larger than the corresponding dimension of shape, either:\n 1) if strict is True, raises an error\n 2) if strict is False, the output will preserve the original (larger) dimension\n\n >>> x = torch.ones((2, 3), dtype=int)\n >>> y = pad_to_shape(x, (4, 6), align=\"start\")\n >>> y.numpy()\n array([[1, 1, 1, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]])\n\n >>> x = torch.ones((2, 3), dtype=int)\n >>> y = pad_to_shape(x, (4, 6), align=(\"start\", \"end\"))\n >>> y.numpy()\n array([[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]])\n\n >>> x = torch.ones((2, 3), dtype=int)\n >>> y = pad_to_shape(x, (4, 6), align=\"middle\")\n >>> y.numpy()\n array([[0, 0, 0, 0, 0, 0],\n [0, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 0, 0],\n [0, 0, 0, 0, 0, 0]])\n \"\"\"\n\n _check_pad_truncate_args_valid(x, shape, align)\n\n if strict and any(dim_x > dim_y for dim_x, dim_y in zip(x.shape, shape)):\n raise IncompatibleShapesError(\n f\"Input shape {x.shape} has a dimension larger than \"\n f\"output shape {shape} and raise_if_larger=True\",\n [x],\n )\n\n if isinstance(align, str):\n align = (align,) * len(shape)\n\n padding = []\n for dim_x, dim_y, side in zip(x.shape, shape, align):\n if dim_y > dim_x:\n padding_size = dim_y - dim_x\n if side == \"start\":\n padding.append((0, padding_size))\n elif side == \"end\":\n padding.append((padding_size, 0))\n elif side == \"middle\":\n padding.append((padding_size // 2, padding_size - padding_size // 2))\n else:\n padding.append((0, 0))\n\n output_shape = tuple(max(dim_x, dim_y) for dim_x, dim_y in zip(x.shape, shape))\n padding_spec = tuple(itertools.chain(*reversed(padding)))\n\n padded = torch.nn.functional.pad(x, pad=padding_spec, **kwargs)\n assert padded.shape == output_shape\n return padded\n\n\ndef truncate_to_shape(\n x: Tensor,\n shape: tuple[int, ...],\n keep_side: Side | Iterable[Side] = \"start\",\n strict: bool = True,\n) -> Tensor:\n \"\"\"\n Truncates x to match a desired shape.\n\n If any dimension of x is smaller than the corresponding dimension of shape, either:\n 1) if strict is True, raises an error\n 2) if strict is False, the output will preserve the original (smaller) dimension\n\n >>> x = torch.arange(4*6, dtype=int).reshape(4, 6)\n >>> x.numpy()\n array([[ 0, 1, 2, 3, 4, 5],\n [ 6, 7, 8, 9, 10, 11],\n [12, 13, 14, 15, 16, 17],\n [18, 19, 20, 21, 22, 23]])\n\n >>> y = truncate_to_shape(x, (2, 3), keep_side=\"start\")\n >>> y.numpy()\n array([[0, 1, 2],\n [6, 7, 8]])\n\n >>> y = truncate_to_shape(x, (2, 3), keep_side=(\"start\", \"end\"))\n >>> y.numpy()\n array([[ 3, 4, 5],\n [ 9, 10, 11]])\n\n >>> y = truncate_to_shape(x, (2, 3), keep_side=\"middle\")\n >>> y.numpy()\n array([[ 7, 8, 9],\n [13, 14, 15]])\n\n \"\"\"\n\n _check_pad_truncate_args_valid(x, shape, keep_side)\n\n if strict and any(dim_x < dim_y for dim_x, dim_y in zip(x.shape, shape)):\n raise IncompatibleShapesError(\n f\"Input shape {x.shape} has a dimension smaller than \"\n f\"output shape {shape} and raise_if_smaller=True\",\n [x],\n )\n\n if isinstance(keep_side, str):\n keep_side = (keep_side,) * len(shape)\n\n output_shape = tuple(min(dim_x, dim_y) for dim_x, dim_y in zip(x.shape, shape))\n slices = []\n for dim_x, dim_y, side in zip(x.shape, shape, keep_side):\n if dim_y < dim_x:\n if side == \"start\":\n slices.append(slice(0, dim_y))\n elif side == \"end\":\n slices.append(slice(dim_x - dim_y, dim_x))\n elif side == \"middle\":\n slices.append(slice((dim_x - dim_y) // 2, (dim_x + dim_y) // 2))\n else:\n slices.append(slice(0, dim_y))\n\n if len(x.shape) == 0:\n truncated = x\n else:\n truncated = x[slices]\n assert truncated.shape == output_shape\n return truncated\n","repo_name":"prompteus/torch-simple","sub_path":"torch_simple/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":5497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"33451969523","text":"from logging import exception\nfrom ..base._processor import Processor\nfrom typing import Literal\n\n\nclass Eraser2D(Processor):\n\n def __init__(self, direction: Literal['left', 'right', 'same'], value=None, sharedDataId=None, reverse=None, name=None):\n super().__init__(sharedDataId=sharedDataId, reverse=reverse, name=name)\n self.direction = direction\n if value == None:\n self.value = None\n elif isinstance(value, list):\n self.value = value\n else:\n self.value = [value]\n\n def scale(self, data, project, params=None):\n return self.apply(data, project, params)\n\n def apply(self, data, project, params=None):\n try:\n result = data\n value = self.getValue(data)\n if self.direction == 'same' or self.direction == 'left':\n while self.isValueVector(result[0], value):\n result = result[1:]\n if self.direction == 'same' or self.direction == 'right':\n while self.isValueVector(result[-1], value):\n result = result[:-1]\n return result\n except ValueError:\n return data\n\n def isValueVector(self, vector, value):\n for i in vector:\n if i != value:\n return False\n return True\n\n def getValue(self, project):\n if self.value != None:\n return self.value\n value = self.getSharedData(project)['value']\n if isinstance(value, list):\n return value\n else:\n return [value]\n\n def reverse(self):\n if self.reverseProcessor != None:\n return self.reverseProcessor\n from ._autoPadding1D import AutoPadding1D\n return AutoPadding1D(direction=self.direction, value=self.value, sharedDataId=self.sharedDataId)\n\n def saveData(self, dataRecorder) -> None:\n super().saveData(dataRecorder)\n dataRecorder.record('direction', self.direction)\n dataRecorder.record('value', self.value)\n","repo_name":"LuizHenriqueKS/ZAIProject","sub_path":"ZAIProject/processor/_eraser2D.py","file_name":"_eraser2D.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"35831110494","text":"#ПОИСК В ШИРИНУ\r\nfrom queue import Queue\r\nq = Queue()\r\ng = [[] for i in range(100000)]\r\nd = [0 for i in range(100000)]\r\n\r\nn, m = list(map(int, input().split()))\r\n\r\nfor i in range(m):\r\n a, b = list(map(int, input().split()))\r\n g[a].append(b)\r\n g[b].append(a)\r\n\r\nd[1] = 1\r\nq.put(1)\r\nwhile not q.empty():\r\n x = q.get()\r\n for i in g[x]:\r\n if d[i] == 0:\r\n d[i] = d[x] + 1\r\n q.put(i)\r\nprint(d[n] - 1)","repo_name":"Jest220/-","sub_path":"Лабораторная №6/2.1.py","file_name":"2.1.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"44826707214","text":"if __name__ == '__main__':\n T = int(input())\n for i in range(T):\n n = int(input())\n arr = list(map(int, input().split()))\n # if n==1:\n # print(\"first\")\n # break\n for i in range(4):\n arr.append(0)\n evensum=sum(arr[0::2])\n oddsum=sum(arr[1::2])\n if oddsum>evensum:\n print(\"second\")\n # 6\n # 1 3 1 5 6 2 4 5 2\n # e e e e e\n\n elif arr[0]+sum(arr[3::2])>arr[1]+arr[2]+sum(arr[4::2]):\n print(\"first\")\n elif arr[0] + sum(arr[3::2]) < arr[1] + arr[2] + sum(arr[4::2]):\n print(\"second\")\n elif evensum>oddsum:\n print(\"first\")\n else:\n print(\"draw\")\n\n\"\"\" for _ in range(int(input())):\n n=int(input())\n l=list(map(int,input().split()))\n l=sorted(l)\n r=l[::-1]\n \n if(n==1 or n==2):\n print(\"first\")\n else:\n p1=r[1]+sum(r[2::2])\n p2=sum(l)-p1\n \n if(p1>p2):\n print(\"second\")\n elif(p2>p1):\n print(\"first\")\n else:\n print(\"draw\") \"\"\"","repo_name":"ramessesii2/CompetitiveProgramming","sub_path":"CodeChef/TOWIN.py","file_name":"TOWIN.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"14426169803","text":"#!/usr/bin/env python3\n\n# Script: Ops 301 Class 09 Ops Challenge Solution\n# Author: Andrew P.\n# Date of lastest revision: 03/24/2023\n# Purpose: Code Fellows requires it to pass class.\n\n# Credit for resources and reference goes to Code academy and itsolutionstuff. \n# https://www.codecademy.com/learn/learn-python-3/modules/learn-python3-files/cheatsheet\n# https://www.itsolutionstuff.com/post/how-to-create-multiline-text-file-in-pythonexample.html\n\n# Create if an statement using a==b or not equals a!=b\na = 1\nb = 2\n\nif a < b:\n print(\"a is less than b\")\nelif a > b:\n print(\"a is greater than b\")\nelse:\n print(\"a is equal to b\")\n\n# Create an if statement using using logical conditions.\na = 1 \nb = 1\n\nif a != b:\n print(\"a is not equal to b\")\nelse:\n print(\"a is equal to b\")\n\n\n# Create an if statement that has elif and else.\na = 15\nb = 20\n\nif a >= b:\n print(\"a is greater than or equal to b\")\nelif a < b:\n print(\"a is less than b\")\nelse:\n print(\"This statement should not be reached\")\n","repo_name":"Perryandr/Ops-301d6-Ops-challenges","sub_path":"Ops301challenge9.py","file_name":"Ops301challenge9.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"12847451841","text":"import tensorflow as tf\n\n# 当去计算一个节点的时候,TensorFlow自动计算它依赖的一组节点,并且首先计算依赖的节点\nw = tf.constant(3)\nx = w + 2\ny = x + 5\nz = x * 3\n\nwith tf.Session() as sess:\n print(y.eval())\n # 这里为了去计算z,又重新计算了x和w,除了Variable值,tf是不会缓存其他比如contant等的值的\n # 一个Variable的生命周期是当它的initializer运行的时候开始,到会话session close的时候结束\n print(z.eval())\n\n# 如果我们想要有效的计算y和z,并且又不重复计算w和x两次,我们必须要求TensorFlow计算y和z在一个图里\nwith tf.Session() as sess:\n y_val, z_val = sess.run([y, z])\n print(y_val)\n print(z_val)\n\n","repo_name":"Asher-1/AI","sub_path":"ML/Regression/linearRegressionByTensorFlow/06_lifecycle.py","file_name":"06_lifecycle.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"zh","doc_type":"code","stars":6,"dataset":"github-code","pt":"46"} +{"seq_id":"1413206096","text":"import asyncio\n\nfrom hbmqtt.mqtt.packet import MQTTPacket, MQTTFixedHeader, UNSUBSCRIBE, PacketIdVariableHeader, MQTTPayload, MQTTVariableHeader\nfrom hbmqtt.errors import HBMQTTException, NoDataException\nfrom hbmqtt.codecs import decode_string, encode_string\n\n\nclass UnubscribePayload(MQTTPayload):\n\n __slots__ = ('topics',)\n\n def __init__(self, topics=[]):\n super().__init__()\n self.topics = topics\n\n def to_bytes(self, fixed_header: MQTTFixedHeader, variable_header: MQTTVariableHeader):\n out = b''\n for topic in self.topics:\n out += encode_string(topic)\n return out\n\n @classmethod\n @asyncio.coroutine\n def from_stream(cls, reader: asyncio.StreamReader, fixed_header: MQTTFixedHeader,\n variable_header: MQTTVariableHeader):\n topics = []\n payload_length = fixed_header.remaining_length - variable_header.bytes_length\n read_bytes = 0\n while read_bytes < payload_length:\n try:\n topic = yield from decode_string(reader)\n topics.append(topic)\n read_bytes += 2 + len(topic.encode('utf-8'))\n except NoDataException:\n break\n return cls(topics)\n\n\nclass UnsubscribePacket(MQTTPacket):\n VARIABLE_HEADER = PacketIdVariableHeader\n PAYLOAD = UnubscribePayload\n\n def __init__(self, fixed: MQTTFixedHeader=None, variable_header: PacketIdVariableHeader=None, payload=None):\n if fixed is None:\n header = MQTTFixedHeader(UNSUBSCRIBE, 0x02) # [MQTT-3.10.1-1]\n else:\n if fixed.packet_type is not UNSUBSCRIBE:\n raise HBMQTTException(\"Invalid fixed packet type %s for UnsubscribePacket init\" % fixed.packet_type)\n header = fixed\n\n super().__init__(header)\n self.variable_header = variable_header\n self.payload = payload\n\n @classmethod\n def build(cls, topics, packet_id):\n v_header = PacketIdVariableHeader(packet_id)\n payload = UnubscribePayload(topics)\n return UnsubscribePacket(variable_header=v_header, payload=payload)\n","repo_name":"beerfactory/hbmqtt","sub_path":"hbmqtt/mqtt/unsubscribe.py","file_name":"unsubscribe.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","stars":790,"dataset":"github-code","pt":"46"} +{"seq_id":"42333808897","text":"from xautodl.models.cell_infers.tiny_network import TinyNetwork\nfrom torch.nn import Module\nfrom typing import Iterable, Callable, Tuple, List, Union\nimport numpy as np\nfrom copy import deepcopy as copy\nfrom .nats_interface import NATSInterface\nfrom .utils import *\nfrom itertools import chain\n\n\nNATSPATH = str(get_project_root()) + \"/archive/NATS-tss-v1_0-3ffb9-simple/\"\n\nclass Individual(): \n def __init__(\n self, \n net:TinyNetwork, \n genotype:list,\n index:int, \n age:int=0,\n dataset:str=\"cifar10\", \n searchspace_interface:object=None):\n \n self.scores = {} # TODO: MOVE EACH INDIVIDUAL SCORE INSIDE THIS DICTIONARY\n self.net = net\n self._genotype = genotype\n self.index=index\n self.age = age\n\n self._fitness = 0\n self._rank = 0\n # searchspace interface needed to exploit search space properties\n if searchspace_interface is None: \n self.interface = NATSInterface(path=NATSPATH, dataset=dataset)\n else: \n self.interface = searchspace_interface\n \n def update_net(self):\n \"\"\"Over-writes net field in light of genotype\"\"\"\n genotype_arch_str = genotype_to_architecture(self.genotype)\n self.net, _ = self.interface.query_with_architecture(architecture_string=genotype_arch_str)\n\n def update_idx(self):\n \"\"\"Over-writes index field in light of genotype\"\"\"\n genotype_arch_str = genotype_to_architecture(self.genotype)\n self.index = self.interface.query_index_by_architecture(architecture_string=genotype_arch_str)\n\n @property\n def genotype(self): \n return self._genotype\n\n def update_genotype(self, new_genotype:List): \n \"\"\"Update current genotype with new one. When doing so, also the network field is updated\"\"\"\n # sanity check on new genotype\n if not genotype_is_valid(genotype=new_genotype):\n ValueError(f\"genotype {new_genotype} is not a valid replacement for {self.genotype}!\")\n\n self._genotype = new_genotype\n self.update_net()\n self.update_idx()\n\n @property\n def fitness(self): \n return self._fitness\n \n def update_fitness(self, metric:Callable, attribute:str=\"net\"): \n \"\"\"Update the current value of fitness using provided metric\"\"\"\n self._fitness = metric(getattr(self, attribute))\n \n def overwrite_fitness(self, new_fitness:float):\n \"\"\"Overwrite current value of fitness\"\"\"\n if isinstance(new_fitness, float) or isinstance(new_fitness, int): \n self._fitness = new_fitness\n else: \n raise ValueError(f\"New fitness value ({new_fitness}) is not a number!\")\n\n @property\n def rank(self): \n return self._rank\n \n def update_ranking(self, new_rank:int) -> None: \n \"\"\"Updates current ranking of considered architecture\"\"\"\n self._rank = new_rank\n\nclass Genetic: \n def __init__(\n self, \n genome:Iterable[str], \n strategy:Tuple[str, str]=\"comma\", \n tournament_size:int=5):\n \n self.genome = set(genome) if not isinstance(genome, set) else genome\n self.strategy = strategy\n self.tournament_size = tournament_size\n\n def tournament(self, population:Iterable[Individual]) -> Iterable[Individual]:\n \"\"\"\n Return tournament, i.e. a random subset of population of size tournament size. \n Sampling is done without replacement to ensure diversity inside the actual tournament.\n \"\"\"\n return np.random.choice(a=population, size=self.tournament_size, replace=False).tolist()\n \n def obtain_parents(self, population:Iterable[Individual], n_parents:int=2) -> Iterable[Individual]:\n \"\"\"Obtain n_parents from population. Parents are defined as the fittest individuals in n_parents tournaments\"\"\"\n tournament = self.tournament(population = population)\n # parents are defined as fittest individuals in tournaments\n parents = sorted(tournament, key = lambda individual: individual.fitness, reverse=True)[:n_parents]\n return parents\n \n def mutate(self, \n individual:Individual, \n n_loci:int=1, \n genes_prob:Tuple[None, List[float]]=None) -> Individual: \n \"\"\"Applies mutation to a given individual\"\"\"\n for _ in range(n_loci): \n mutant_genotype = copy(individual.genotype)\n # select a locus in the genotype (that is, where mutation will occurr)\n if genes_prob is None: # uniform probability over all loci\n mutant_locus = np.random.randint(low=0, high=len(mutant_genotype))\n else: # custom probability distrubution over which locus to mutate\n mutant_locus = np.random.choice(mutant_genotype, p=genes_prob)\n # mapping the locus to the actual gene that will effectively change\n mutant_gene = mutant_genotype[mutant_locus]\n operation, level = mutant_gene.split(\"~\") # splits the gene into operation and level\n # mutation changes gene, so the current one must be removed from the pool of candidate genes\n mutations = self.genome.difference([operation])\n \n # overwriting the mutant gene with a new one - probability of chosing how to mutate should be selected as well\n mutant_genotype[mutant_locus] = np.random.choice(a=list(mutations)) + f\"~{level}\"\n\n mutant_individual = Individual(net=None, genotype=None, index=None)\n mutant_individual.update_genotype(mutant_genotype)\n\n return mutant_individual\n \n def recombine(self, individuals:Iterable[Individual], P_parent1:float=0.5) -> Individual: \n \"\"\"Performs recombination of two given `individuals`\"\"\"\n if len(individuals) != 2: \n raise ValueError(\"Number of individuals cannot be different from 2!\")\n \n individual1, individual2 = individuals\n recombinant_genotype = [None for _ in range(len(individual1.genotype))]\n for locus_idx, (gene_1, gene_2) in enumerate(zip(individual1.genotype, individual2.genotype)):\n # chose genes from parent1 according to P_parent1\n recombinant_genotype[locus_idx] = gene_1 if np.random.random() <= P_parent1 else gene_2\n\n recombinant = Individual(net=None, genotype=None, index=None)\n recombinant.update_genotype(list(recombinant_genotype))\n\n return recombinant\n\nclass Population: \n def __init__(self,\n space:object,\n init_population:Union[bool, Iterable]=True,\n n_individuals:int=20,\n normalization:str='dynamic'): \n self.space = space\n if init_population is True:\n self._population = generate_population(searchspace_interface=space, n_individuals=n_individuals)\n else: \n self._population = init_population\n \n self.oldest = None\n self.worst_n = None\n self.normalization = normalization.lower()\n if self.normalization in ['minmax', 'standard']:\n df_scores = pd.read_csv(f'{str(get_project_root())}/cachedmetrics/{self.space.dataset}_{normalization}.csv')\n self.scores_dict = {\n key: value for value, key in enumerate(df_scores.columns)\n }\n self.extreme_scores = df_scores.values\n \n def __iter__(self): \n for i in self._population: \n yield i\n \n @property\n def individuals(self):\n return self._population\n \n def update_population(self, new_population:Iterable[Individual]): \n \"\"\"Overwrites current population with new one stored in `new_population`\"\"\"\n if all([isinstance(el, Individual) for el in new_population]):\n del self._population\n self._population = new_population\n else:\n raise ValueError(\"new_population is not an Iterable of `Individual` datatype!\")\n\n def fittest_n(self, n:int=1): \n \"\"\"Return first `n` individuals based on fitness value\"\"\"\n return sorted(self._population, key=lambda individual: individual.fitness, reverse=True)[:n]\n \n def update_ranking(self): \n \"\"\"Updates the ranking in the population in light of fitness value\"\"\"\n sorted_individuals = sorted(self._population, key=lambda individual: individual.fitness, reverse=True)\n \n # ranking in light of individuals \n for ranking, individual in enumerate(sorted_individuals):\n individual.update_ranking(new_rank=ranking)\n\n def update_fitness(self, fitness_function:Callable): \n \"\"\"Updates the fitness value of individuals in the population\"\"\"\n for individual in self.individuals: \n individual.overwrite_fitness(fitness_function(individual))\n \n def apply_on_individuals(self, function:Callable)->Union[Iterable, None]: \n \"\"\"Applies a function on each individual in the population\n \n Args: \n function (Callable): function to apply on each individual. Must return an object of class Individual.\n Returns: \n Union[Iterable, None]: Iterable when inplace=False represents the individuals with function applied.\n None represents the output when inplace=True (hence function is applied on the\n actual population.\n \"\"\"\n self._population = [function(individual) for individual in self._population]\n\n def set_extremes(self, score:str):\n \"\"\"Set the maximal&minimal value in the population for the score 'score' (must be a class attribute)\"\"\"\n if self.normalization == 'dynamic':\n # accessing to the score of each individual\n scores = [getattr(ind, score) for ind in self.individuals]\n min_value = min(scores)\n max_value = max(scores)\n elif self.normalization == 'minmax':\n # extreme_scores is a 2x`number_of_scores`\n min_value, max_value = self.extreme_scores[:, self.scores_dict[score]]\n elif self.normalization == 'standard':\n # extreme_scores is a 2x`number_of_scores`\n mean_value, std_value = self.extreme_scores[:, self.scores_dict[score]]\n\n if self.normalization in ['minmax', 'dynamic']:\n setattr(self, f\"max_{score}\", max_value)\n setattr(self, f\"min_{score}\", min_value)\n else:\n setattr(self, f\"mean_{score}\", mean_value)\n setattr(self, f\"std_{score}\", std_value)\n\n def age(self): \n \"\"\"Embeds ageing into the process\"\"\"\n def individuals_ageing(individual): \n individual.age += 1\n return individual\n\n self.apply_on_individuals(function=individuals_ageing)\n \n def add_to_population(self, new_individuals:Iterable[Individual]): \n \"\"\"Add new_individuals to population\"\"\"\n # TODO: add a block that if new_individuals are over the current extremes resets those\n self._population = list(chain(self.individuals, new_individuals))\n \n def remove_from_population(self, attribute:str=\"fitness\", n:int=1, ascending:bool=True): \n \"\"\"Remove first/last `n` elements from sorted population population in `ascending/descending`\n order based on the value of `attribute`\"\"\"\n # TODO: Implement a removal from population strategy that is O(n) (remove min individual) \n # - currently sorting is obviously not!\n if not all([hasattr(el, attribute) for el in self.individuals]):\n raise ValueError(f\"Attribute '{attribute}' is not an attribute of all the individuals!\")\n # sort the population based on the value of attribute\n sorted_population = sorted(self.individuals, key=lambda ind: getattr(ind, attribute), reverse=False if ascending else True)\n \n # new population is old population minus the `n` worst individuals with respect to `attribute`\n self.update_population(sorted_population[n:])\n\n def update_oldest(self, candidate:Individual): \n \"\"\"Updates oldest individual in the population\"\"\"\n if candidate.age >= self.oldest.age: \n self.oldest = candidate\n else: \n pass\n\n def update_worst_n(self, candidate:Individual, attribute:str=\"fitness\", n:int=2): \n \"\"\"Updates worst_n elements in the population\"\"\"\n if hasattr(candidate, attribute): \n if any([getattr(candidate, attribute) < getattr(worst, attribute) for worst in self.worst_n]):\n # candidate is worse than one of the worst individuals\n bad_individuals = self.worst_n + candidate\n # sort in increasing values of fitness\n bad_sorted = sorted(bad_individuals, lambda ind: getattr(ind, attribute))\n self.worst_n = bad_sorted[:n] # return new worst individuals\n \n def set_oldest(self): \n \"\"\"Sets oldest individual in population\"\"\"\n self.oldest = max(self.individuals, key=lambda ind: ind.age)\n \n def set_worst_n(self, attribute:str=\"fitness\", n:int=2): \n \"\"\"Sets worst n elements based on the value of arbitrary attribute\"\"\"\n self.worst_n = sorted(self.individuals, key=lambda ind: getattr(ind, attribute))[:n]\n \n\ndef generate_population(searchspace_interface:NATSInterface, n_individuals:int=20)->list: \n \"\"\"Generate a population of individuals\"\"\"\n # at first generate full architectures, cell-structure and unique network indices\n architectures, cells, indices = searchspace_interface.generate_random_samples(n_samples=n_individuals)\n \n # mapping strings to list of genes (~genomes)\n genotypes = map(lambda cell: architecture_to_genotype(cell), cells)\n # turn full architecture and cell-structure into genetic population individual\n population = [\n Individual(net=net, genotype=genotype, index=index) \n for net, genotype, index in zip(architectures, genotypes, indices)\n ]\n return population\n","repo_name":"fracapuano/FreeREA","sub_path":"commons/genetics.py","file_name":"genetics.py","file_ext":"py","file_size_in_byte":14028,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"26230363306","text":"#import modules\nimport pandas as pd\nimport datetime\nfrom datetime import timedelta\nimport requests\nimport json\nfrom requests.auth import HTTPBasicAuth\nimport tkinter as tk\nfrom tkinter import ttk\n\nmy_font = (\"Helvetica\", 12)\n\ndef popupmsg(msg):\n\tpopup = tk.Tk()\n\tpopup.wm_title(\"Got it turttle!\")\n\tlabel = ttk.Label(popup, text=msg, font=my_font)\n\tlabel.pack(side=\"top\", fill=\"x\", padx=10, pady=20)\n\t#B1 = ttk.Button(popup, text=\"Okay\", command = popup.destroy)\n\t#B1 = pack()\n\tpopup.mainloop()\n\n#display options\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 20)\npd.set_option('display.width', 8000)\n\n#Today's day value in order to compare with worksheet\nnow = pd.Timestamp(datetime.date.today())\n\n#set file path\nfile_path = 'G:\\My Drive\\Formulario Abastecimiento\\Abastecimiento.xlsm'\nfile_path2 = 'G:\\My Drive\\Planos\\Criterios.xlsx' \n\n\ndef readFiles(f1): #That is if you can \n\ttry:\n\t\t#load Abastecimiento.xlsm\n\t\tf = pd.read_excel(f1, sheet_name = 'BD_Apr', dtype=str) #importing workbook\n\texcept:\n\t\treturn print('Tha fuck is happening with your paths???')\n\treturn f\n\ndf = readFiles(file_path)\ndf2 = pd.read_excel(file_path2, sheet_name = 'Bodega', dtype=str) #Calling criterios to perform combine action\ndf3 = pd.read_excel(file_path2, sheet_name = 'Bodega', dtype=str) #Calling criterios to perform combine action\n#df = df.parse() #Selecting sheet\n#############################################################################################\ndf = df.drop(['User_Apr', 'D_Item_Apr', 'Factor_Um_Item_Apr', 'Peso_Um_Item_Apr', 'Usr_LE', 'Fecha_VbOk', 'If_ok', 'TB_LE', 'Orig_Bodega_Apr', 'Dest_Bodega_Apr'], axis=1) #Droping columns\ndf = df.drop(df.columns[df.columns.str.contains('Unnamed',case = False)],axis = 1) #drop columns with no title\ndf['Date_Apr'] = df['Date_Apr'].astype('datetime64[ns]') \n\ndf = df.loc[df['Date_Apr'] == now] #replace with now #selecting last register if applies\ndf = df.loc[df['Estado'] == 'OK'] \ndf = df.drop(['Estado'], axis=1)\ndf = df.set_index('Id_Apr') #Asigning new column to index\ndf = df.reset_index() #self explanatory\nround(pd.to_numeric(df['Qty_kg_Apr']),0)\ndf['Qty_Um_Apr'] = round(pd.to_numeric(df['Qty_Um_Apr']),0)\ndf = df.loc[df['Qty_Um_Apr'] > 0]\ndf = df.set_index('Id_Apr') #Asigning new column to inex\ndf = df.reset_index() #self explanatory\n#df = df.loc[:0,:]\ndf = df.loc[df['Cod_Bodega_O_Apr'] != df['Cod_Bodega_D_Apr']]\ndf = df.set_index('Id_Apr') #Asigning new column to inex\ndf = df.reset_index() #self explanatory\ndf.rename(columns={'Cod_Bodega_O_Apr': 'id_Bodega', 'Cod_Bodega_D_Apr': 'id_Bodega2'}, inplace=True) #Renaming columns\ndf3.rename(columns={'id_Bodega':'id_Bodega2'}, inplace=True) #preparation to combine id_Bodega\ndf2 = df2.drop(['descBodega', 'estado'], axis=1) #Vlookup(combination) for id_COpera\ndf3 = df3.drop(['descBodega', 'estado'], axis=1)\ndf = df.merge(df2, on='id_Bodega')\ndf = df.merge(df3, on='id_Bodega2')\ndf = df.loc[df['id_Bodega2'] != '00092']\ndf = df.loc[df['id_Bodega2'] != '00095']\ndf = df.loc[df['id_Bodega2'] != '00-91']\ndf = df.loc[df['id_Bodega'] != '00-91'] #sometimes\ndf['ctrl1'] = df['id_Bodega'] + df['id_Bodega2'] #Control column\ndf['Date_ent'] = df['Date_Apr'] + timedelta(days=6)\n\ndf = df.sort_values('ctrl1')\n\nl = list(df['ctrl1']) # extracting a list to pass to find_unique function\n\n\ndef find_unique(lists): # function to find unique values \n\tunique = []\n\ti=0\n\tfor regis in lists:\n\t\tif regis != lists[abs(i-1)]:\n\t\t\tunique.append('1')\n\t\telse:\n\t\t\tunique.append('0')\n\t\ti=i+1\n\tunique[0] = '1'\n\treturn unique\n\nctrl2 = find_unique(l) #creating list and \ndf.insert(loc=11, column='ctrl2', value=ctrl2)\n\nl2 = list(df['ctrl2']) # extracting a list to pass to find_unique function\n\n\ndef find_consec(lists2): #function to find consecutive values\n\tconsec = []\n\t#consec[0] = '1'\n\ti=0\n\tfor regis in lists2:\n\t\tif regis == '0':\n\t\t\tconsec.append(str(i))\n\t\telse:\n\t\t\tconsec.append('1')\n\t\t\ti=1\n\t\ti=i+1\n\treturn consec\n\nctrl3 = find_consec(l2)\n\ndf.insert(loc=12, column='ctrl3', value=ctrl3)\n\nl3 = list(df['ctrl2']) # extracting a list to pass to find_unique function\n\n\ndef find_doc(lists3): #function to find doc values\n\tdoc = []\n\t#consec[0] = '1'\n\ti=0\n\tfor regis in lists3:\n\t\tif regis == '1':\n\t\t\tdoc.append(str(i+1))\n\t\t\ti=i+1\n\t\telse:\n\t\t\tdoc.append(str(i))\n\treturn doc\n\nctrl4 = find_doc(l3)\n\ndf.insert(loc=11, column='ctrl4', value=ctrl4)\ndf['Date_Apr'] = df['Date_Apr'].astype(str)\ndf['Date_ent'] = df['Date_ent'].astype(str)\n\nl4 = list(df['Date_Apr']) #Extraction a list to remove the hyphens\nl5 = list(df['Date_ent'])\n\n\ndef kill_hyphens(lists4):\n\tno_hyphens = []\n\tfor d in lists4:\n\t\tno_hyphens.append(d.replace('-',''))\n\treturn no_hyphens\n\ndf['Date_ent'] = kill_hyphens(l5)\ndf['Date_Apr'] = kill_hyphens(l4)\n\ndocs = df.loc[df['ctrl2'] == '1']\ndocs = docs.drop(['ctrl2', 'id_COpera_x', 'Um_Item_Apr', 'Qty_kg_Apr', 'Item_Apr', 'ctrl3'], axis=1)\ndocs= docs[['id_COpera_y', 'ctrl4', 'Date_Apr', 'Date_ent', 'id_Bodega', 'id_Bodega2']]\ndocs = docs.set_index('id_COpera_y')\ndocs = docs.reset_index()\n\ndf = df.drop(['Id_Apr', 'ctrl1', 'Date_Apr', 'ctrl2', 'id_Bodega2', 'Qty_kg_Apr'], axis=1)\ndf = df[['id_COpera_y', 'ctrl4', 'ctrl3', 'Item_Apr', 'id_Bodega', 'Um_Item_Apr', 'Qty_Um_Apr', 'Date_ent', 'id_COpera_x']]\ndf = df.set_index('id_COpera_y')\ndf = df.reset_index()\n\nnee = {\"id_COpera_y\": \"copera\", \"ctrl4\":\"numDoc\", \"Date_Apr\":\"doc_FechaDocumento\", \"Date_ent\":\"doc_FechaEntrega\", \"id_Bodega\":\"doc_BodegaSalida\", \"id_Bodega2\":\"doc_BodegaEntrada\"}\nnee2 = {\"id_COpera_y\": \"copera\", \"ctrl4\":\"numDoc\", \"ctrl3\":\"mov_Registro\", \"Item_Apr\":\"mov_Item\", \"id_Bodega\":\"mov_Bodega\", \"Um_Item_Apr\":\"mov_UMedida\", \"Qty_Um_Apr\":\"mov_Cantidad\", \"Date_ent\":\"mov_FechaEntrega\", \"id_COpera_x\":\"mov_Copera\"}\n\ndocs.rename(columns=nee, inplace=True) #Renaming columns\ndf.rename(columns=nee2, inplace=True) #Renaming columns\n\ndf = df.set_index('copera')\ndf = df.reset_index()\n\ndocs = docs.set_index('copera')\ndocs = docs.reset_index()\n\n'''\ndate_to_name = str(now)\ndate_to_name = date_to_name[0:10]\nfile_name = 'REQUISICIONES_PLANEACION_' + date_to_name + '.xlsx'\n\nwrite_dfs = pd.ExcelWriter(file_name, engine = 'xlsxwriter')\ndocs.to_excel(write_dfs, sheet_name = 'Documentos')\ndf.to_excel(write_dfs, sheet_name = 'Movimientos')\nwrite_dfs.save()\nwrite_dfs.close() #This is in case i need it as emergency solution!'''\n\nemptyDIC1 = {}\nemptyDIC2 = {}\nfullDIC = {}\nlistOUTTER =[]\nlistINNER = []\n\nlistOUTTER.append(emptyDIC1)\nlistOUTTER.append(fullDIC)\nlistOUTTER.append(emptyDIC2)\t\n\ndata = ''\ns=0\nj=0\nrows_docs = len(docs.index)\nrows_df = len(df.index)\n\nfor k in range(rows_docs):\n\tid_doc = int(docs.iat[s,1])\n\tfullDIC.update( {\"copera\": docs.iat[s,0], \"numDoc\": docs.iat[s,1], \"doc_FechaDocumento\": docs.iat[s,2], \"doc_FechaEntrega\": docs.iat[s,3], \"doc_BodegaSalida\": docs.iat[s,4], \"doc_BodegaEntrada\": docs.iat[s,5]} )\n\tj=0\n\tfor i in range(rows_df): #built an identifier that corelates rows_df with rows_docs wich is ctrl 2 or numdoc that i always do.\n\t\tid_mov = int(df.iat[j,1])\n\t\tif id_doc == id_mov:\n\t\t\tlistINNER.append({\"copera\": (df.iat[j,0]), \"numDoc\": df.iat[j,1], \"mov_Registro\": (df.iat[j,2]), \"mov_Item\": (df.iat[j,3]), \"mov_Bodega\": (df.iat[j,4]), \"mov_UMedida\": (df.iat[j,5]), \"mov_Cantidad\": (df.iat[j,6]), \"mov_FechaEntrega\": (df.iat[j,7])})\n\t\tj=j+1\n\t# Adding a new key value pair n values\n\tfullDIC.update( {'mov_' : listINNER} )\n\tdata = json.dumps(str(listOUTTER))\n\tdata = data.replace('\\'', '\\\"')\n\tdata = data[1:-1]\n\t#print(data)\n\tlistINNER = []\n\ts=s+1\n\t\n\tMy_url = 'http://olympus.web.lan/Olympus/api/siesa/importar/rstOly_sob_aba_sie_pedInterno'\n\tMy_auth = HTTPBasicAuth('dpto_user', 'pass2019')\n\n\tr = requests.post( url = My_url, data = data, auth = My_auth)\n\t#print(r.text)\n\n#print('Success!')\npopupmsg(\"Hurry up and beat me dowm, i don't like what im supposed to handle.\")\n","repo_name":"Marlond25/inventory-transfer-procedure","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":7782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"20839409557","text":"import os as _os\nimport ctypes\nimport numpy as _np\n\nfrom . import _internal\nfrom .. import name as _name, attribute\nfrom ._internal import SymbolBase, _symbol_creator\nfrom ..base import mx_uint, check_call, _LIB, py_str\nfrom ..symbol_doc import _build_doc\nfrom ..base import _Null, _init_op_module, _is_np_op, _output_is_list\nfrom ..name import NameManager\nfrom ..profiler import _current_scope as _profiler_scope\nfrom ..ndarray import get_dtype_name\n# pylint: enable=unused-import\n\n\ndef _verify_np_symbol(op_name, func_name, sym):\n \"\"\"Verify if the sym is a numpy symbol.\n\n Parameters\n ----------\n op_name : str\n Operator full name registered in backend.\n func_name : str\n Operator name exposed to users. This is usually the name by stripping off\n the prefix of the full operator names registered in backend.\n sym : symbol to be verified\n \"\"\"\n from .numpy._symbol import _Symbol as np_symbol\n if not isinstance(sym, np_symbol):\n raise TypeError('Operator `{}` registered in backend is known as `{}` in Python. '\n 'This is a numpy operator which can only accept '\n 'MXNet numpy ndarrays, while received a legacy ndarray. '\n 'Please ensure that you have activated numpy semantics by calling '\n '`npx.set_np()` in your code. If you still see this error with numpy '\n 'semantics activated, please call `as_np_ndarray()` upon the legacy '\n 'ndarray to convert it to an MXNet numpy ndarray, and then feed the '\n 'converted array to this operator.'\n .format(op_name, func_name))\n\n\ndef _verify_legacy_symbol(op_name, func_name, sym):\n \"\"\"Verify if the sym is a legacy symbol.\n\n Parameters\n ----------\n op_name : str\n Operator full name registered in backend.\n func_name : str\n Operator name exposed to users. This is usually the name by stripping off\n the prefix of the full operator names registered in backend.\n sym : symbol to be verified\n \"\"\"\n from .numpy._symbol import _Symbol as np_symbol\n if isinstance(sym, np_symbol):\n raise TypeError('Operator `{}` registered in backend is known as `{}` in Python. '\n 'This is a legacy operator which can only accept '\n 'legacy ndarrays, while received an MXNet numpy ndarray. '\n 'Please call `as_nd_ndarray()` upon the numpy ndarray to '\n 'convert it to a legacy ndarray, and then feed the converted '\n 'array to this operator.'\n .format(op_name, func_name))\n\n\ndef _generate_symbol_function_code(handle, op_name, func_name, signature_only=False):\n \"\"\"Generate function for symbol op by handle and function name.\"\"\"\n real_name = ctypes.c_char_p()\n desc = ctypes.c_char_p()\n num_args = mx_uint()\n arg_names = ctypes.POINTER(ctypes.c_char_p)()\n arg_types = ctypes.POINTER(ctypes.c_char_p)()\n arg_descs = ctypes.POINTER(ctypes.c_char_p)()\n key_var_num_args = ctypes.c_char_p()\n ret_type = ctypes.c_char_p()\n\n check_call(_LIB.MXSymbolGetAtomicSymbolInfo(\n handle, ctypes.byref(real_name), ctypes.byref(desc),\n ctypes.byref(num_args),\n ctypes.byref(arg_names),\n ctypes.byref(arg_types),\n ctypes.byref(arg_descs),\n ctypes.byref(key_var_num_args),\n ctypes.byref(ret_type)))\n narg = int(num_args.value)\n arg_names = [py_str(arg_names[i]) for i in range(narg)]\n arg_types = [py_str(arg_types[i]) for i in range(narg)]\n key_var_num_args = py_str(key_var_num_args.value)\n ret_type = py_str(ret_type.value) if ret_type.value is not None else ''\n doc_str = _build_doc(op_name,\n py_str(desc.value),\n arg_names,\n arg_types,\n [py_str(arg_descs[i]) for i in range(narg)],\n key_var_num_args,\n ret_type)\n\n dtype_name = None\n arr_name = None\n ndsignature = []\n signature = []\n ndarg_names = []\n kwarg_names = []\n for i in range(narg):\n name, atype = arg_names[i], arg_types[i]\n if name == 'dtype':\n dtype_name = name\n signature.append(f'{name}=_Null')\n elif atype.startswith('NDArray') or atype.startswith('Symbol'):\n assert not arr_name, \\\n \"Op can only have one argument with variable \" \\\n \"size and it must be the last argument.\"\n if atype.endswith('[]'):\n ndsignature.append(f'*{name}')\n arr_name = name\n else:\n ndsignature.append(f'{name}=None')\n ndarg_names.append(name)\n else:\n signature.append(f'{name}=_Null')\n kwarg_names.append(name)\n #signature.append('is_train=False')\n signature.append('name=None')\n signature.append('attr=None')\n signature.append('out=None')\n signature.append('**kwargs')\n signature = ndsignature + signature\n\n is_np_op = _is_np_op(op_name)\n output_is_list = _output_is_list(op_name)\n verify_symbol_fn = _verify_np_symbol.__name__ if is_np_op else _verify_legacy_symbol.__name__\n code = []\n if arr_name:\n code.append(\"\"\"\ndef %s(*%s, **kwargs):\"\"\"%(func_name, arr_name))\n if not signature_only:\n code.append(\"\"\"\n sym_args = []\n for i in {}:\n assert isinstance(i, SymbolBase), \\\\\n \"Positional arguments must be Symbol instances, \" \\\\\n \"but got %s\"%str(i)\n {}('{}', '{}', i)\n sym_args.append(i)\"\"\".format(arr_name, verify_symbol_fn, op_name, func_name))\n if dtype_name is not None:\n code.append(\"\"\"\n if '%s' in kwargs:\n kwargs['%s'] = get_dtype_name(kwargs['%s'])\"\"\"%(dtype_name, dtype_name, dtype_name))\n code.append(\"\"\"\n attr = kwargs.pop('attr', None)\n kwargs.update(attribute.current().get(attr))\n name = kwargs.pop('name', None)\n name = _name.current().get(name, '%s')\n _ = kwargs.pop('out', None)\n keys = []\n vals = []\n sym_kwargs = dict()\n for k, v in kwargs.items():\n if isinstance(v, SymbolBase):\n sym_kwargs[k] = v\n %s('%s', '%s', v)\n else:\n keys.append(k)\n vals.append(v)\"\"\"%(func_name.lower(), verify_symbol_fn, op_name, func_name))\n if key_var_num_args: # pylint: disable=using-constant-test\n code.append(\"\"\"\n if '%s' not in kwargs:\n keys.append('%s')\n vals.append(len(sym_args) + len(sym_kwargs))\"\"\"%(\n key_var_num_args, key_var_num_args))\n\n code.append(\"\"\"\n if 'profiler_scope' not in keys:\n keys.append('profiler_scope')\n vals.append(_profiler_scope.get())\n return _symbol_creator(%d, sym_args, sym_kwargs, keys, vals, name, %s, %s)\"\"\"%(\n handle.value, str(is_np_op), str(output_is_list)))\n else:\n code.append(\"\"\"\ndef %s(%s):\"\"\"%(func_name, ', '.join(signature)))\n if not signature_only:\n code.append(\"\"\"\n kwargs.update(attribute.current().get(attr))\n sym_kwargs = dict()\n _keys = []\n _vals = []\n for _k, _v in kwargs.items():\n if isinstance(_v, SymbolBase):\n sym_kwargs[_k] = _v\n {}('{}', '{}', _v)\n else:\n _keys.append(_k)\n _vals.append(_v)\"\"\".format(verify_symbol_fn, op_name, func_name))\n # NDArray args\n for name in ndarg_names: # pylint: disable=redefined-argument-from-local\n code.append(\"\"\"\n if {name} is not None:\n assert isinstance({name}, SymbolBase), \\\\\n \"Argument {name} must be Symbol instances, but got %s\"%str({name})\n sym_kwargs['{name}'] = {name}\"\"\".format(name=name))\n code.append(\"\"\"\n {}('{}', '{}', {name})\n \"\"\".format(verify_symbol_fn, op_name, func_name, name=name))\n # kwargs\n for name in kwarg_names: # pylint: disable=redefined-argument-from-local\n code.append(\"\"\"\n if %s is not _Null:\n _keys.append('%s')\n _vals.append(%s)\"\"\"%(name, name, name))\n # dtype\n if dtype_name is not None:\n if is_np_op:\n code.append(\"\"\"\n if %s is not _Null and %s is not None:\n _keys.append('%s')\n _vals.append(get_dtype_name(%s))\"\"\"%(dtype_name, dtype_name, dtype_name, dtype_name))\n else:\n code.append(\"\"\"\n if %s is not _Null:\n _keys.append('%s')\n _vals.append(get_dtype_name(%s))\"\"\"%(dtype_name, dtype_name, dtype_name))\n\n code.append(\"\"\"\n name = _name.current().get(name, '%s')\n if 'profiler_scope' not in _keys:\n _keys.append('profiler_scope')\n _vals.append(_profiler_scope.get())\n return _symbol_creator(%d, None, sym_kwargs, _keys, _vals, name, %s, %s)\"\"\"%(\n func_name.lower(), handle.value, str(is_np_op), str(output_is_list)))\n\n if signature_only:\n code.append(\"\"\"\n return (0,)\"\"\")\n\n doc_str_lines = _os.linesep+''.join([' '+s if s.strip() else s\n for s in 'r\"\"\"{doc_str}\"\"\"'.format(doc_str=doc_str)\n .splitlines(True)])\n code.insert(1, doc_str_lines)\n return ''.join(code), doc_str\n\n\ndef _make_symbol_function(handle, name, func_name):\n \"\"\"Create a symbol function by handle and function name.\"\"\"\n code, doc_str = _generate_symbol_function_code(handle, name, func_name)\n\n local = {}\n exec(code, None, local) # pylint: disable=exec-used\n symbol_function = local[func_name]\n symbol_function.__name__ = func_name\n symbol_function.__doc__ = doc_str\n symbol_function.__module__ = 'mxnet.symbol'\n return symbol_function\n\n_init_op_module('mxnet', 'symbol', _make_symbol_function)\n\n# Update operator documentation with added float support\n# Note that we can only do this after the op module is initialized\n# Otherwise the backend operators cannot be found\n# pylint: disable=wrong-import-position\nfrom .contrib import adamw_update, mp_adamw_update\nfrom ._internal import _adamw_update, _mp_adamw_update\nadamw_update.__doc__ = _adamw_update.__doc__.replace(\"rescale_grad : Symbol\",\n \"rescale_grad : Symbol or float\")\nmp_adamw_update.__doc__ = _mp_adamw_update.__doc__.replace(\"rescale_grad : Symbol\",\n \"rescale_grad : Symbol or float\")\n","repo_name":"apache/mxnet","sub_path":"python/mxnet/symbol/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":10696,"program_lang":"python","lang":"en","doc_type":"code","stars":20642,"dataset":"github-code","pt":"46"} +{"seq_id":"24366865889","text":"import requests\nfrom bs4 import BeautifulSoup\n\nresponse = requests.get(\"https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=%EC%82%BC%EC%84%B1%EC%A0%84%EC%9E%90\")\n# get 요청을 한 html을 얻기 위해서는 response.text\nhtml = response.text\nsoup = BeautifulSoup(html,'html.parser')\n# select_one 이 아닌 select를 사용하게 되면 list에 담겨서 정보가 넘어온다.\nlinks = soup.select(\".news_tit\")\n# print(links)\n# 리스트에서 하나씩 꺼내어서 title을 뽑아낸다.\nfor link in links :\n title = link.text\n # href의 속성값을 가져온다.\n url = link.attrs['href']\n print(title,url)\n # print(title)","repo_name":"ehddud1006/dongpu2","sub_path":"python_Crolling/Chapter02/뉴스제목과링크가져오기.py","file_name":"뉴스제목과링크가져오기.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"43283427318","text":"import self_py_fun.GlobalEEG as gc\nfrom self_py_fun.ExistMLFun import *\nsns.set_context('notebook')\nreshape_to_1d = False\nshow_dim_bool = True\n\nbp_low = 0.5\n# bp_upp = 6\nbp_upp = float(sys.argv[4])\nif float(bp_upp) == int(bp_upp):\n bp_upp = int(bp_upp)\nelse:\n bp_upp = float(bp_upp)\n\nif bp_upp < 0:\n eeg_file_suffix = 'raw'\nelse:\n eeg_file_suffix = 'raw_bp_{}_{}'.format(bp_low, bp_upp)\neeg_file_suffix_2 = '{}_{}_from_{}'.format(\n gc.file_subscript, gc.DEC_FACTOR, eeg_file_suffix\n)\n\nEEGswLDAObj = ExistMLPred(\n data_type=gc.DATA_TYPE,\n sub_folder_name=gc.sub_file_name,\n sub_name_short=gc.sub_file_name[:4],\n # EEGGeneralFun class\n # sampling_rate=gc.sampling_rate,\n num_repetition=gc.NUM_REPETITION,\n num_electrode=gc.NUM_ELECTRODE,\n flash_and_pause_length=gc.FLASH_AND_PAUSE_LENGTH,\n num_letter=gc.LETTER_DIM,\n n_multiple=gc.N_MULTIPLE,\n local_bool=gc.local_use\n)\n\n\n# Import the training set without subsetting yet\n[eeg_signals, eeg_code, eeg_type] = EEGswLDAObj.import_eeg_processed_dat(eeg_file_suffix_2, reshape_to_1d)\neeg_signals = np.transpose(eeg_signals, [1, 0, 2, 3])\n\n# Produce truncated eeg signals subset\neeg_signals_trun, eeg_type_sub = EEGswLDAObj.create_truncate_segment_batch(\n eeg_signals, eeg_type, gc.LETTER_DIM,\n gc.NUM_REPETITION, show_dim_bool\n)\n\n[eeg_signals_trun_t_mean,\n eeg_signals_trun_nt_mean,\n eeg_signals_trun_t_cov,\n eeg_signals_trun_nt_cov] = EEGswLDAObj.produce_trun_mean_cov_subset(\n eeg_signals_trun, eeg_type_sub\n)\n\nEEGswLDAObj.produce_mean_covariance_plots(\n eeg_signals_trun_t_mean, eeg_signals_trun_nt_mean,\n None, None, eeg_file_suffix_2, sim_dat=False\n)\n\n# For odd/even inference purpose\n[eeg_signals_trun_odd, eeg_type_odd, eeg_code_odd,\n eeg_signals_trun_even, eeg_type_even, eeg_code_even] = EEGswLDAObj.split_trunc_train_set_odd_even(\n eeg_signals_trun, eeg_type, eeg_code, gc.rep_odd_id, gc.rep_even_id\n)\n\n# Save the entire training sequence and\n# extended eeg_type/label for matlab usage.\n\neeg_signals_trun = np.transpose(eeg_signals_trun, [1, 0, 2])\nEEGswLDAObj.save_truncate_signal_1d_real(\n eeg_signals_trun, eeg_type, eeg_code,\n eeg_file_suffix_2,\n gc.NUM_REPETITION, array_3d_bool=False\n)\n# Save odd/even sequence for matlab use.\nEEGswLDAObj.save_truncate_signal_1d_real(\n eeg_signals_trun_odd, eeg_type_odd, eeg_code_odd,\n eeg_file_suffix_2 + '_odd',\n len(gc.rep_odd_id), array_3d_bool=True\n)\nEEGswLDAObj.save_truncate_signal_1d_real(\n eeg_signals_trun_even, eeg_type_even, eeg_code_even,\n eeg_file_suffix_2 + '_even',\n len(gc.rep_even_id), array_3d_bool=True\n)\n\n\n# Produce the truncated mean curves for training set here\neeg_type_odd_1d = np.reshape(\n eeg_type_odd, [gc.LETTER_DIM * len(gc.rep_odd_id) * gc.NUM_REP]\n)\neeg_signals_trun_odd = np.transpose(eeg_signals_trun_odd, [1, 0, 2])\n[eeg_signals_odd_trun_t_mean, eeg_signals_odd_trun_nt_mean,\n eeg_signals_odd_trun_t_cov, eeg_signals_odd_trun_nt_cov] = EEGswLDAObj.produce_trun_mean_cov_subset(\n eeg_signals_trun_odd, eeg_type_odd_1d\n)\n\n# print(eeg_signals_odd_trun_t_cov[:, :5, :5])\n\nEEGswLDAObj.produce_mean_covariance_plots(\n eeg_signals_odd_trun_t_mean, eeg_signals_odd_trun_nt_mean,\n None, None, eeg_file_suffix_2 + '_odd', sim_dat=False\n)\n\n# Produce the truncated mean curves for testing set here\neeg_type_even_1d = np.reshape(\n eeg_type_even, [gc.LETTER_DIM * len(gc.rep_even_id) * gc.NUM_REP]\n)\neeg_signals_trun_even = np.transpose(eeg_signals_trun_even, [1, 0, 2])\n[eeg_signals_even_trun_t_mean,\n eeg_signals_even_trun_nt_mean,\n eeg_signals_even_trun_t_cov,\n eeg_signals_even_trun_nt_cov] = EEGswLDAObj.produce_trun_mean_cov_subset(\n eeg_signals_trun_even, eeg_type_even_1d\n)\n\nEEGswLDAObj.produce_mean_covariance_plots(\n eeg_signals_even_trun_t_mean, eeg_signals_even_trun_nt_mean,\n None, None, eeg_file_suffix_2 + '_even', sim_dat=False\n)","repo_name":"NiubilityDiu/BayesInferenceEEGBCI","sub_path":"Python/real_data/EEG_existML_pre.py","file_name":"EEG_existML_pre.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"5658873380","text":"from abc import ABCMeta, abstractmethod\n\nfrom py_pdf_term.tokenizers import Term\n\nfrom ..classifiers import (\n BaseTokenClassifier,\n EnglishTokenClassifier,\n JapaneseTokenClassifier,\n)\n\n\nclass BaseSplitter(metaclass=ABCMeta):\n \"\"\"Base class for splitters of a wrongly concatenated term.\n\n Since text extraction from PDF is not perfect especially in a table or a figure,\n a term may be wrongly concatenated. For example, when a PDF file contains a table\n which shows the difference between quick sort, merge sort, and heap sort, the\n extracted text may be something like \"quick sort merge sort heap sort\". In this\n case, \"quick sort\", \"merge sort\", and \"heap sort\" are wrongly concatenated.\n\n This class is used to split a wrongly concatenated term into subterms.\n\n Args\n ----\n classifiers:\n List of token classifiers to classify tokens into specific categories.\n If None, the default classifiers are used. The default classifiers are\n JapaneseTokenClassifier and EnglishTokenClassifier.\n \"\"\"\n\n def __init__(self, classifiers: list[BaseTokenClassifier] | None = None) -> None:\n if classifiers is None:\n classifiers = [\n JapaneseTokenClassifier(),\n EnglishTokenClassifier(),\n ]\n\n self._classifiers = classifiers\n\n @abstractmethod\n def split(self, term: Term) -> list[Term]:\n \"\"\"Split a wrongly concatenated term.\n\n Args\n ----\n term:\n Wrongly concatenated term to be split.\n\n Returns\n -------\n list[Term]:\n List of split terms.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__}.split()\")\n","repo_name":"kumachan-mis/py-pdf-term","sub_path":"py_pdf_term/candidates/_candidates/splitters/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"37317464553","text":"#!/usr/bin/env python\n# coding=utf-8\nimport numpy as np\nimport matplotlib.pyplot as plt\n#============== read meta =================================\nPATHdataA='/home/ycli/data/gbt/gbt_15hr_41-80_pointcorr/secA_15hr_41-80_pointcorr_clean_map_I_800.npy'\nPATHdataB='/home/ycli/data/gbt/gbt_15hr_41-80_pointcorr/secB_15hr_41-80_pointcorr_clean_map_I_800.npy'\nfrom core import algebra\ndef ReadMeta(data_path):\n '''return freq ra dec'''\n data = algebra.make_vect(algebra.load(data_path))\n freq = data.get_axis('freq')\n ra = data.get_axis('ra')\n dec = data.get_axis('dec')\n return freq,ra,dec\nFreq,Ra,Dec=ReadMeta(PATHdataA)\ndef plot(data,freq,sigma=False):\n if sigma:\n mean=data[freq].mean()\n std=data[freq].std()\n min=mean-std\n max=mean+std\n else:\n min=None\n max=None\n plt.pcolormesh(Dec,Ra,data[freq],vmin=min,vmax=max)\n plt.colorbar()\n plt.xlim([Dec.min(),Dec.max()])\n plt.ylim([Ra.min(),Ra.max()])\n plt.xlabel('dec')\n plt.ylabel('ra')\n plt.gca().invert_yaxis()\n\n\nN=6\n#cut=0\ndataA=np.load(PATHdataA)#[:,cut:-cut,cut:-cut]\nRa=Ra#[cut:-cut]\nDec=Dec#[cut:-cut]\ndataB=np.load(PATHdataB)\nshape=dataA.shape\nmapA=dataA.reshape(shape[0],-1)\nmapB=dataB.reshape(shape[0],-1)\nC=np.dot(mapA,mapA.T)\nU,s,V=np.linalg.svd(C)\n\nS=np.zeros_like(s)\nS[:N]=1.\n#map_clean=np.dot((1-np.dot(np.dot(U,np.diag(S)),V)),mapA)\nFg=np.dot(np.dot(np.dot(U,np.diag(S)),V),mapA)\nmap_clean=mapA-Fg\nmap_clean=map_clean.reshape(shape)\nplt.figure('pca',figsize=(24,18))\nplt.subplot(212)\nplt.title('remove %d mode'%N)\nplt.semilogy(s**0.5,'g.-')\nplt.semilogy(s[:N]**0.5,'r.')\nplt.ylim(0.1,(s**0.5).max())\nplt.subplot(231)\nplt.title('origin')\nplot(dataA,100)\nplt.subplot(232)\nplt.title('foreground')\nplot(Fg.reshape(shape),100)\nplt.subplot(233)\nplt.title('cleaned')\nplot(map_clean,100,sigma=True)\n\n#print map_clean.shape\n#map_clean=map_clean.reshape(shape)\n#plt.savefig('/home/mtx/ICA_learning/ICA4GBT/pca/data_pca/PCA_remove_%dmode.png'%N)\nplt.show()\n#np.save('/home/mtx/ICA_learning/ICA4GBT/pca/data_pca/PCA_remove_%dmode'%N,map_clean)\n","repo_name":"POFK/ICA_learning","sub_path":"ICA4GBT/backup_0701/pca/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28705284354","text":"def frame_at_coordinates_jit(coordinates, mask, mask_index_array, psf):\n \"\"\" Compute the frame (indexes of pixels light is blurred into) and psf_frame (psf kernel values of those \\\n pixels) for a given coordinate in a masks and its PSF.\n\n Parameters\n ----------\n coordinates: (int, int)\n The coordinates of mask_index_array on which the frame should be centred\n psf_shape: (int, int)\n The shape of the psf for which this frame will be used\n \"\"\"\n\n psf_shape = psf.shape\n psf_max_size = psf_shape[0] * psf_shape[1]\n\n half_x = int(psf_shape[0] / 2)\n half_y = int(psf_shape[1] / 2)\n\n frame = -1 * np.ones((psf_max_size))\n psf_frame = -1.0 * np.ones((psf_max_size))\n\n count = 0\n for i in range(psf_shape[0]):\n for j in range(psf_shape[1]):\n x = coordinates[0] - half_x + i\n y = coordinates[1] - half_y + j\n if 0 <= x < mask_index_array.shape[0] and 0 <= y < mask_index_array.shape[1]:\n value = mask_index_array[x, y]\n if value >= 0 and not mask[x, y]:\n frame[count] = value\n psf_frame[count] = psf[i, j]\n count += 1\n\n return frame, psf_frame","repo_name":"MichaelFu1998-create/security_scanning","sub_path":"codesearchnet/codesearchnet_10644.py","file_name":"codesearchnet_10644.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"19283511015","text":"import sys\n\nimport faulthandler\nimport time\n\nimport ctrlxdatalayer\nfrom ctrlxdatalayer.variant import Result\n\nfrom helper.ctrlx_datalayer_helper import get_client\nimport datalayerclient.app\n\n\nif __name__ == '__main__':\n\n print()\n print(\"=============================================================================\")\n print(\"Simple ctrlX Data Layer Client Snap in Python using Data Layer subscriptions.\")\n print(\"Will be restarted by the snap system.\")\n print(\"=============================================================================\")\n print()\n\n faulthandler.enable()\n\n with ctrlxdatalayer.system.System(\"\") as datalayer_system:\n datalayer_system.start(False)\n\n datalayer_client, datalayer_client_connection_string = get_client(datalayer_system)\n if datalayer_client is None:\n print(\"WARNING Connecting\", datalayer_client_connection_string, \"failed.\")\n sys.exit(1)\n\n with datalayer_client: # datalayer_client is closed automatically when leaving with block\n\n subscription_properties = ctrlxdatalayer.subscription.create_properties(\n \"python-datalayer-client-sub\", publish_interval=100)\n\n if subscription_properties is None:\n print(\"ERROR create_properties() returned: None\")\n sys.exit(1)\n\n with subscription_properties:\n result, subscription = datalayerclient.app.subscribe_single(\n datalayer_client, subscription_properties)\n if result != Result.OK:\n print(\"ERROR subscribe_single() failed with:\", result)\n sys.exit(1)\n\n if subscription is None:\n print(\"ERROR subscribe_single() returned None\")\n sys.exit(1)\n\n with subscription:\n time.sleep(10.0)\n subscription.unsubscribe_all()\n\n result, subscription = datalayerclient.app.subscribe_multi(\n datalayer_client, subscription_properties)\n if result != Result.OK:\n print(\"ERROR subscribe_multi() failed with:\", result)\n sys.exit(1)\n\n if subscription is None:\n print(\"ERROR subscribe_multi() returned None\")\n sys.exit(1)\n\n with subscription:\n\n # Endless loop\n while datalayer_client.is_connected():\n time.sleep(1.0)\n\n subscription.unsubscribe_all()\n\n # Attention: Doesn't return if any provider or client instance is still running\n stop_ok = datalayer_system.stop(False)\n print(\"System Stop\", stop_ok)\n\n sys.exit(0)\n","repo_name":"LvEkk/ctrlx-automation-sdk","sub_path":"samples-python/datalayer.client.sub/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"46"} +{"seq_id":"43629444636","text":"# -*- coding: utf-8 -*-\r\n\r\n'''\r\nCreated on 2020/12/10 14:26\r\n\r\n@project: OCR\r\n\r\n@filename: watermark.py\r\n\r\n@author: knavezl\r\n\r\n@Desc: \r\n \r\n'''\r\nimport os,math\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\n#拼接图片\r\ndef image_compose(width_num,height_num,mark_img_path,mark_img_width,mark_img_height):\r\n compose_img_path = 'images/compose_img.png'\r\n to_image = Image.new('RGBA', (width_num * mark_img_width, height_num * mark_img_height)) #创建一个新图\r\n # 循环遍历,把每张图片按顺序粘贴到对应位置上\r\n for y in range(1, height_num + 1):\r\n for x in range(1, width_num + 1):\r\n from_image = Image.open(mark_img_path).resize((mark_img_width,mark_img_height),Image.ANTIALIAS)\r\n to_image.paste(from_image, ((x - 1) * mark_img_width, (y - 1) * mark_img_height))\r\n # to_image.save(compose_img_path) # 保存新图\r\n return to_image\r\n\r\n\r\n#叠加图片\r\n# 该接口使用掩码(mask)的形式对两幅图像进行合并。\r\ndef blend_images(src_img,compose_img,res_img_path):\r\n img1 = src_img\r\n\r\n img2 = compose_img\r\n\r\n r, g, b, alpha = img2.split()\r\n alpha = alpha.point(lambda i: i > 0 and 204) # 204起到的效果和使用blend()接口时的0.3类似。\r\n\r\n img = Image.composite(img2, img1, alpha)\r\n\r\n img.show()\r\n img.save(res_img_path)\r\n\r\n#添加水印\r\ndef add_water_mark(text,src_img_path,mark_img_path,res_img_path):\r\n src_img=Image.open(src_img_path)\r\n src_img = src_img.convert('RGBA')\r\n src_img_width,src_img_height=src_img.size\r\n\r\n #print('src_img_width=',src_img_width)\r\n #print('src_img_height=',src_img_height)\r\n\r\n text_to_img(text, mark_img_path)\r\n mark_img = Image.open(mark_img_path)\r\n mark_img_width, mark_img_height = mark_img.size\r\n #print('mark_img_width=', mark_img_width)\r\n #print('mark_img_height=', mark_img_height)\r\n\r\n width_num=math.ceil(src_img_width / mark_img_width)\r\n height_num = math.ceil(src_img_height / mark_img_height)\r\n\r\n #拼接图片\r\n compose_img=image_compose(width_num, height_num, mark_img_path, mark_img_width, mark_img_height)\r\n #叠加图片\r\n blend_images(src_img,compose_img,res_img_path)\r\n\r\n# 文字转图片\r\ndef text_to_img(text,save_img_path):\r\n new_image = Image.new('RGBA', (256,256)) # 创建一个新图\r\n draw = ImageDraw.Draw(new_image)\r\n font = ImageFont.truetype('font/simsun.ttc', 20)\r\n draw.text((0, 100), text, (255,0,0), font=font)\r\n\r\n # 旋转图像\r\n new_image = new_image.rotate(20, expand=1)\r\n #new_image.show()\r\n new_image.save(os.path.expanduser(save_img_path))\r\n\r\nif __name__ == '__main__':\r\n src_img_path='images/sfz.png'\r\n mark_img_path='images/mark.png'\r\n res_img_path='images/sfz_mark.png'\r\n text=u\"——仅供办理居住证使用——\"\r\n text_to_img(text, mark_img_path)\r\n\r\n add_water_mark(text,src_img_path,mark_img_path,res_img_path)\r\n\r\n","repo_name":"knavezl/water_mark","sub_path":"watermark.py","file_name":"watermark.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"29222789241","text":"from nltk.tokenize import WhitespaceTokenizer\nfrom collections import Counter\nimport random\n\n\ndef is_end_of_sentence(x):\n return x.endswith('.') or x.endswith('?') or x.endswith('!')\n\n\ndef create_data():\n print(\"Input name of file with data\")\n name_of_file = input()\n file = open(name_of_file, 'r', encoding='utf-8')\n text = file.read()\n file.close()\n words = WhitespaceTokenizer().tokenize(text)\n bigrams = [words[i] + ' ' + words[i + 1] for i in range(len(words) - 1)]\n\n # key is word, value is dict where key is word and value is num of such bigrams\n dict_with_bigrams = {}\n list_of_bigrams = Counter(bigrams).most_common()\n\n for bigram in list_of_bigrams:\n current_ = bigram[0].split()\n dict_with_bigrams.setdefault(current_[0], {})\n dict_with_bigrams[current_[0]][current_[1]] = bigram[1]\n return words, dict_with_bigrams\n\n\nif __name__ == '__main__':\n words, dict_with_bigrams = create_data()\n print(\"Input number of sentences\")\n\n n = int(input())\n for i in range(n):\n current_word = None\n for j in range(80):\n # first word\n while not current_word:\n current_word = random.choice(words)\n if not current_word[0].isupper() or is_end_of_sentence(current_word):\n current_word = None\n else:\n if j == 0:\n print(current_word, end=\" \")\n tails = [i for i in dict_with_bigrams[current_word].keys()]\n nums = [i for i in dict_with_bigrams[current_word].values()]\n current_word = random.choices(tails, nums)[0]\n print(current_word, end=\" \")\n if j >= 4 and (is_end_of_sentence(current_word)):\n break\n print()\n","repo_name":"MilenaTss/Markov_Chain_Text_generator","sub_path":"bigrams.py","file_name":"bigrams.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"7048836728","text":"from selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport re\n\nclass Result:\n def __init__(self, url, title, content):\n self.url = url\n self.title = title\n self.content = content\n\n\ndef scrapePage(query):\n # Setting up driver\n options = webdriver.ChromeOptions()\n options.binary_location = \"/usr/bin/chromium\"\n options.add_argument(\"--headless\")\n driver = webdriver.Chrome(options=options)\n\n driver.get('https://pt.wikipedia.org/wiki/Wikipédia:Página_principal')\n\n try:\n searchBox = driver.find_element_by_xpath('/html/body/div[5]/div[1]/div[2]/div/form/div/input[1]')\n searchBox.send_keys(query)\n\n searchButton = driver.find_element_by_xpath('/html/body/div[5]/div[1]/div[2]/div/form/div/input[4]')\n searchButton.click()\n\n resultUrl = driver.current_url\n resultTitle = driver.find_element_by_xpath('/html/body/div[3]/h1').text\n resultContent = re.sub(r'\\[.*?\\]', \"\", str(driver.find_element_by_xpath('/html/body/div[3]/div[3]/div[5]/div[1]/p[1]').text))\n\n driver.close()\n\n return Result(resultUrl, resultTitle, resultContent)\n\n except (NoSuchElementException) as error:\n print(error)\n\n return -1\n\n driver.close()\n\n","repo_name":"luissimas/telegram-bot","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"12934911811","text":"\nclass Node:\n \"\"\"A node stores data and a pointer to the next node\"\"\"\n def __init__(self, data, pointer=None):\n self.data = data\n self.pointer = pointer\n\n\nclass LinkedList:\n \"\"\"A singly linked list\"\"\"\n def __init__(self, head=None):\n self.head = head\n\n def print_list(self):\n \"\"\"Traverses the list, printing the data of each node\"\"\"\n if self.head:\n n = self.head\n while n:\n if n.pointer:\n print(n.data, end=', ')\n else:\n print(n.data)\n n = n.pointer\n\n def get_length(self):\n \"\"\"Returns the number of nodes in the list\"\"\"\n length = 0\n if self.head:\n length += 1\n n = self.head\n while n.pointer:\n n = n.pointer\n length += 1\n return length\n\n\n def peek(self):\n \"\"\"Returns the data of the list's head\"\"\"\n return self.head.data if self.head else None\n\n def delete_list(self):\n \"\"\"Deletes all the contents of the list using garbage collection\"\"\"\n self.head = None\n\n def push(self, new_data):\n \"\"\"Pushes a node containing the new data to the front of the list\"\"\"\n node = Node(new_data)\n if not self.head:\n self.head = node\n else:\n node.pointer = self.head\n self.head = node\n\n def append(self, new_data):\n \"\"\"Appends a node containing the new data to the end of the list\"\"\"\n node = Node(new_data)\n if self.head:\n n = self.head\n while n:\n if n.pointer:\n n = n.pointer\n continue\n break\n n.pointer = node\n else:\n self.head = node\n\n def insert_node(self, new_data, pos):\n \"\"\"Inserts a node containing the new data at the given position (1-n)\"\"\"\n\n # Ensures valid argument\n if type(pos) != int:\n print(\"Position must be an integer value\")\n return False\n\n # Pushes data\n elif pos == 1:\n self.push(new_data)\n return True\n\n # No nodes to insert behind\n elif not self.head:\n print('No nodes in list')\n return False\n\n # Appends data\n elif self.get_length() + 1 == pos:\n self.append(new_data)\n return True\n\n # Inserts data in between two nodes\n elif 1 < pos <= self.get_length():\n n = self.head\n for i in range(1, pos):\n if i == pos - 1:\n prev = n\n n = n.pointer\n node = Node(new_data, n)\n prev.pointer = node\n return True\n\n # Intended position of node exceeds the length of the list + 1\n # Eg: Node at position 3 cannot point to node at position 5\n print('No node at previous position')\n return False\n\n def remove_node(self, pos):\n \"\"\"Disengages a node from the list and returns it's data\"\"\"\n\n # List is empty\n if not self.head:\n print('List is already empty')\n return False\n\n # Removing the head of the list\n elif pos == 1:\n prev = self.head\n self.head = self.head.pointer\n prev.pointer = None\n return prev.data\n\n # Removing the tail of the list\n elif self.get_length() == pos:\n n = self.head\n while n.pointer.pointer:\n n = n.pointer\n data = n.pointer.data\n n.pointer = None\n return data\n\n # Removing an element between two nodes\n elif 1 < pos < self.get_length():\n n = self.head\n for i in range(1, pos):\n if i == pos - 1:\n prev = n\n n = n.pointer\n prev.pointer = n.pointer\n n.pointer = None\n return n.data\n\n print('There is no node at this index')\n return False\n\n\n# Driver program\nif __name__ == '__main__':\n lst = LinkedList()\n\n print('----------------------')\n print('Before list generation:')\n print('Length of list:', lst.get_length())\n print('Peek:', lst.peek())\n print('----------------------')\n print('During list generation (errors):')\n\n lst.push('b') # b\n lst.remove_node(2) # b --> (no node at this position to remove)\n lst.append('d') # b, d\n lst.insert_node('c', 2) # b, c, d\n lst.insert_node('y', 5) # b, c, d --> (node at position 5 will not link with node at position 3)\n lst.push('a') # a, b, c, d\n lst.append('e') # a, b, c, d, e\n removed = lst.remove_node(4) # a, b, c, e\n lst.insert_node('z', 3) # a, b, z, c, e\n lst.remove_node(-9293) # a, b, z, c, e --> (no node at this position to remove)\n\n print('---------------------------')\n print('After list generation:')\n print('List:')\n lst.print_list()\n print('Length of list:', lst.get_length())\n print('Peek:', lst.peek())\n print('Data removed:', removed)\n print('---------------------------')\n lst.delete_list()\n print('List deleted.')\n print('Length of list:', lst.get_length())\n print('Peek:', lst.peek())","repo_name":"Scott-Wilson-00/Practice-Files","sub_path":"Linked-Lists/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"21695090552","text":"SITENAME = 'pelican_feed_generator test'\nSITEURL = 'http://localhost:8000'\n\nTIMEZONE = 'Europe/Rome'\nDEFAULT_LANG = 'en'\nDEFAULT_PAGINATION = False\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nRELATIVE_URLS = True\n\nPATH = \"content\"\nOUTPUT_PATH = \"build\"\n\nSTATIC_PATHS = [\n \"static\",\n]\n\nPLUGIN_PATHS = [\n \"../pelican/plugins\",\n]\n\nPLUGINS = [\n \"feed_xslt\",\n]\n\nFEED_RSS = \"rss.xml\"\nFEED_ALL_RSS = \"rss-all.xml\"\n\nXSLT_PATH_RSS = \"static/rss.xsl\"\n","repo_name":"atomicparade/pelican_feed_xslt","sub_path":"sample/pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"1059498080","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 30 23:19:37 2020\r\n\r\n@author: Eslam Youssef\r\n\"\"\"\r\n\r\nfrom flask import Flask, request, render_template\r\nimport pickle\r\nimport numpy as np\r\n\r\napp = Flask(__name__)\r\nmodel = pickle.load(open('classifier.pkl', 'rb'))\r\nscaler= pickle.load(open('scaler.pkl','rb'))\r\n\r\n#print('Model Attributes')\r\n#print(model.__dict__)\r\n#print('Scaler Attributes')\r\n#print(scaler.__dict__)\r\n#print('\\n\\n Age Mean=',scaler.mean_[0])\r\n#print('\\n\\n Age Std=',scaler.scale_[0])\r\n#print('\\n\\n Salary Mean=',scaler.mean_[1])\r\n#print('\\n\\n Salary Std=',scaler.scale_[1])\r\n\r\nage_mean=scaler.mean_[0]\r\nage_std=scaler.scale_[0]\r\nsalary_mean=scaler.mean_[1]\r\nsalary_std=scaler.scale_[1]\r\n\r\n@app.route('/')\r\ndef home():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict',methods=['POST'])\r\ndef predict():\r\n \r\n print('Age Mean=',age_mean)\r\n \r\n \r\n \r\n data = request.form.to_dict()\r\n \r\n age=int(data['age'])\r\n salary=int(data['salary'])\r\n \r\n # Scaling inputs\r\n age_scaled=float((age-age_mean)/age_std)\r\n salary_scaled=float((salary-salary_mean)/salary_std)\r\n \r\n print('Age Scaled: ',age_scaled)\r\n print('Salary Scaled: ',salary_scaled)\r\n\r\n \r\n arr=np.array([[age_scaled,salary_scaled]])\r\n\r\n \r\n pred=model.predict(arr)\r\n \r\n return render_template('index.html', result=pred)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"eslam-yousssef/Social-Network-Adds-ML","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"73208682060","text":"import json\nimport os\nimport pickle\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import pearsonr\nfrom sklearn.dummy import DummyClassifier, DummyRegressor\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom sklearn.linear_model import LassoCV, LogisticRegressionCV\nfrom sklearn.metrics import (\n accuracy_score,\n balanced_accuracy_score,\n mean_squared_error,\n precision_score,\n r2_score,\n recall_score,\n)\nfrom sklearn.naive_bayes import ComplementNB, MultinomialNB\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom .data_utils import split_data, subsampling_balance\nfrom .DRM_utils import (\n get_SDRMs,\n get_DRMs_only,\n get_all_DRMs,\n get_accessory,\n get_standalone,\n get_NRTI,\n get_NNRTI,\n get_Other,\n)\n\nHERE = os.path.dirname(__file__)\n\n\nclass NoCrashModel:\n def __init__(self, *args, **kwargs):\n pass\n\n def fit(self, *args, **kwargs):\n pass\n\n def predict(self, X, *args, **kwargs):\n return [\"This model does not exist\"] * len(X)\n\n\nclass FisherTestModel:\n CORRECTIONS = [\"Bonferroni\", \"fdr_bh\", \"fdr_by\"]\n\n def __init__(self, subtype, DRMs, seqs, target, correction, n_vote, alpha=0.05):\n if correction not in FisherTestModel.CORRECTIONS:\n raise ValueError(\n f\"correction must be one of following: {FisherTestModel.CORRECTIONS}\"\n )\n self.n_vote = n_vote\n self.correction, self.subtype, self.alpha, self.target = (\n correction,\n subtype,\n alpha,\n target,\n )\n self.mutations = self._read_file(correction, subtype, DRMs, seqs, target, alpha)\n self.classes_ = [0, 1]\n\n def _read_file(self, correction, subtype, DRMs, seqs, target, alpha):\n mutations = pd.read_csv(\n os.path.join(HERE, \"data/fisher_p_values.tsv\"), sep=\"\\t\", index_col=0\n )\n subset = mutations[\n (mutations[\"subtype\"] == subtype)\n & (mutations[\"target\"] == target)\n & (mutations[\"DRMs\"].apply(str.upper) == DRMs.upper())\n & (mutations[\"seqs\"].apply(str.upper) == seqs.upper())\n ][[correction]]\n return subset\n\n def fit(self, *args, **kwargs):\n pass\n\n def predict(self, X, *args, **kwargs):\n sign = self.mutations[\n self.mutations[self.correction] <= self.alpha\n ].index.tolist()\n sub = X.filter(sign, axis=1)\n presence = sub.sum(axis=1)\n return (presence >= self.n_vote).astype(int)\n\n\nclass FisherBonf1(FisherTestModel):\n def __init__(self, subtype, DRMs, seqs, target, alpha=0.05):\n super().__init__(subtype, DRMs, seqs, target, \"Bonferroni\", 1, alpha)\n\n\nclass FisherBonf2(FisherTestModel):\n def __init__(self, subtype, DRMs, seqs, target, alpha=0.05):\n super().__init__(subtype, DRMs, seqs, target, \"Bonferroni\", 2, alpha)\n\n\nclass FisherBH1(FisherTestModel):\n def __init__(self, subtype, DRMs, seqs, target, alpha=0.05):\n super().__init__(subtype, DRMs, seqs, target, \"fdr_bh\", 1, alpha)\n\n\nclass FisherBH2(FisherTestModel):\n def __init__(self, subtype, DRMs, seqs, target, alpha=0.05):\n super().__init__(subtype, DRMs, seqs, target, \"fdr_bh\", 2, alpha)\n\n\nclass DRMClassifier:\n choices = {\n \"SDRM\": get_SDRMs,\n \"DRM\": get_DRMs_only,\n \"ALL\": get_all_DRMs,\n \"ACCESSORY\": get_accessory,\n \"STANDALONE\": get_standalone,\n \"NRTI\": get_NRTI,\n \"NNRTI\": get_NNRTI,\n \"OTHER\": get_Other,\n }\n\n def __init__(self, type_, votes):\n self.votes = votes\n self.classes_ = [0, 1]\n self.mutations = DRMClassifier.choices.get(type_)()\n if self.mutations is None:\n raise ValueError(\n f\"wrong mutation class, must one of: {DRMClassifier.choices}\"\n )\n\n def fit(self, *args, **kwargs):\n pass\n\n def predict(self, X, *args, **kwargs):\n return (X.filter(self.mutations, axis=1).sum(axis=1) >= self.votes).astype(int)\n\n\nclass DRMs1(DRMClassifier):\n def __init__(self):\n super().__init__(\"ALL\", 1)\n\n\nclass DRMs2(DRMClassifier):\n def __init__(self):\n super().__init__(\"ALL\", 2)\n\n\nclass SDRMs1(DRMClassifier):\n def __init__(self):\n super().__init__(\"SDRM\", 1)\n\n\nclass SDRMs2(DRMClassifier):\n def __init__(self):\n super().__init__(\"SDRM\", 2)\n\n\nSTAT_MODELS = {\n \"FisherBonf1\": FisherBonf1,\n \"FisherBonf2\": FisherBonf2,\n \"FisherBH1\": FisherBH1,\n \"FisherBH2\": FisherBH2,\n}\n\nDRM_MODELS = {\n \"DRMs1\": DRMs1,\n \"DRMs2\": DRMs2,\n \"SDRMs1\": SDRMs1,\n \"SDRMs2\": SDRMs2,\n}\n\nREGRESSION_TARGETS = [\n \"ABC\",\n \"AZT\",\n \"FTC\",\n \"3TC\",\n \"TDF\",\n \"DOR\",\n \"EFV\",\n \"ETR\",\n \"NVP\",\n \"RPV\",\n]\n\nCLASSIFICATION_MODELS = {\n \"RF\": RandomForestClassifier,\n \"Logistic\": LogisticRegressionCV,\n \"KNN\": KNeighborsClassifier,\n \"Bayes\": MultinomialNB,\n \"Complement\": ComplementNB,\n \"FisherBonf1\": FisherBonf1,\n \"FisherBonf2\": FisherBonf2,\n \"FisherBH1\": FisherBH1,\n \"FisherBH2\": FisherBH2,\n \"DRMs1\": DRMs1,\n \"DRMs2\": DRMs2,\n \"SDRMs1\": SDRMs1,\n \"SDRMs2\": SDRMs2,\n}\n\nREGRESSION_MODELS = {\"RF\": RandomForestRegressor, \"Lasso\": LassoCV}\n\n\ndef get_sets(split_path, target):\n train, test = pickle.load(open(split_path, \"rb\"))\n return split_data(train, target), split_data(test, target)\n\n\ndef pair_measure_with_features(features, measures):\n paired = {}\n for feature, measure in zip(features, measures):\n paired[feature] = measure\n return paired\n\n\ndef read_data(data):\n if isinstance(data, str):\n return pd.read_csv(data, sep=\"\\t\", index_col=0, header=0)\n return data\n\n\ndef train_model(\n model_type, train_set, params_path, target, subtype, DRMs, seqs, balance=False\n):\n\n if target in REGRESSION_TARGETS:\n target = f\"{target}_Score\"\n models = REGRESSION_MODELS\n else:\n models = CLASSIFICATION_MODELS\n\n if model_type in [\"Bayes\", \"Complement\"]:\n params = dict()\n elif model_type in STAT_MODELS.keys():\n params = {\"subtype\": subtype, \"DRMs\": DRMs, \"seqs\": seqs, \"target\": target}\n elif isinstance(params_path, dict):\n params = params_path\n else:\n params = json.load(open(params_path, \"r\"))\n\n data = read_data(train_set)\n\n if balance:\n data = subsampling_balance(data, target)\n\n X_train, y_train = split_data(data, target)\n\n model = models.get(model_type, NoCrashModel)(**params)\n model.fit(X_train, y_train)\n\n coefs = get_coefficients(model, X_train.columns.tolist())\n\n return model, coefs\n\n\ndef get_predictions(model, test_set, target, balance=False):\n\n data = read_data(test_set)\n\n if target in REGRESSION_TARGETS:\n target = f\"{target}_Score\"\n\n if balance:\n data = subsampling_balance(data, target)\n\n X_test, y_test = split_data(data, target)\n\n predictions = model.predict(X_test)\n\n if getattr(model, \"predict_proba\", None):\n probabilities = model.predict_proba(X_test)\n df = pd.DataFrame(probabilities, columns=model.classes_)\n df.set_index(X_test.index, inplace=True)\n df[\"pred\"] = predictions\n df[\"real\"] = y_test\n else:\n df = pd.DataFrame(predictions, columns=[\"pred\"], index=X_test.index)\n df[\"real\"] = y_test\n\n return df\n\n\ndef get_coefficients(model, features):\n index = model.classes_ if len(model.classes_) > 2 else model.classes_[-1:]\n if isinstance(model, (RandomForestClassifier, RandomForestRegressor)):\n coefs = [model.feature_importances_] * len(index)\n elif np.array([isinstance(model, clf) for clf in STAT_MODELS.values()]).any():\n coefs = model.mutations.copy()\n coefs[\"pos\"] = 1 - coefs.iloc[:, 0]\n coefs.columns = [0, 1]\n return coefs.transpose()\n elif np.array([isinstance(model, clf) for clf in DRM_MODELS.values()]).any():\n coefs = pd.DataFrame([[0, 0]] * len(features), index=features, columns=[0, 1])\n coefs.filter(model.mutations, axis=0).loc[:, 1] = 1\n return coefs.transpose()\n else:\n coefs = model.coef_\n return pd.DataFrame(coefs, index=index, columns=features)\n","repo_name":"lucblassel/utils_hiv","sub_path":"utils_hiv/utils/learning_utils.py","file_name":"learning_utils.py","file_ext":"py","file_size_in_byte":8198,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"34496157729","text":"N = int(input())\r\ndata = [0]\r\nfor _ in range(N):\r\n data.append(int(input()))\r\ndp = [0] * 301\r\ndp[0] = data[0]\r\ndp[1] = data[0] + data[1]\r\nfor i in range(2, len(data)):\r\n dp[i] = data[i] + max(data[i - 1] + dp[i - 3], dp[i - 2])\r\nprint(dp[N])","repo_name":"dltkdcks456/Study-Practice","sub_path":"백준/Silver/2579. 계단 오르기/계단 오르기.py","file_name":"계단 오르기.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"8292162951","text":"#reference to master doc\ndef addSheetToMaster():\n scoresheet_link = input(\"Input link to scoresheet to add to DB\")\n\n return\n\ndef undoEdit():\n \n return\n\ndef main():\n config = open(\"config.txt\", \"r\")\n master_sheet = config.readline()\n organization = config.readline()\n if (master_sheet == \"\" or organization == \"\"):\n print(\"Please edit config to add necessary data:\\n\" +\n \" Organization name \\n\" +\n \" Link to master sheet\")\n mode = input(\"Please select mode:\\n\"+\n \"1. add new sheet to master\\n\" +\n \"2. undo last modification\\n\" +\n \"3. edit config\\n\" +\n \"4. close application\")\n\n \n if (mode == 1):\n addSheetToMaster()\n elif (mode == 2):\n undoEdit()\n","repo_name":"op23n1/UAQuizBowlStatTrack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"19296874850","text":"class Node():\r\n def __init__(self,val):\r\n self.info = val\r\n self.link = None\r\n\r\n \r\nclass Singly_linked():\r\n def __init__(self):\r\n self.head = None\r\n\r\n def display(self):\r\n if self.head is None:\r\n return\r\n else:\r\n p = self.head\r\n while p:\r\n print(p.info)\r\n p = p.link\r\n\r\n def insert_at_end(self,data):\r\n if self.head is None:\r\n self.head = Node(data)\r\n else:\r\n p = self.head\r\n while p.link:\r\n p = p.link\r\n p.link = Node(data)\r\n \r\n\r\n def create_Node(self):\r\n n = int(input(\"Enter how many nodes you want to create\"))\r\n if n<=0:\r\n return\r\n else:\r\n for i in range(n):\r\n data = int(input(\"Enter the Node\"))\r\n self.insert_at_end(data)\r\n \r\n\r\nobj = Singly_linked()\r\nobj.create_Node()\r\nobj.display()\r\n","repo_name":"dj5353/Data-Structures-using-python","sub_path":"CodeChef/middle_of_linked_list.py","file_name":"middle_of_linked_list.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"31704696162","text":"\r\n# Take a string and find unique character in that string\r\n# Unique character = Which occurs only once in the string\r\n\r\ns = str(input('\\nEnter : '))\r\n\r\ncharOrder = []\r\ncnt = {}\r\n\r\nfor c in s:\r\n if c in cnt:\r\n cnt[c]+=1\r\n else:\r\n cnt[c] = 1\r\n charOrder.append(c)\r\nprint('The unique character(s) is/are :', end='')\r\nfor c in charOrder:\r\n if cnt[c]==1:\r\n print(c, end=', ')","repo_name":"SubrataG99/BasicPythonCodes","sub_path":"UniqueChar.py","file_name":"UniqueChar.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"11203326534","text":"# List the business_id , full address and categories of the Top 10 businesses using the average ratings.\n# This will require you to use review.csv and business.csv files.\n# Sample output:\n# business id full address categories avg rating\n# xdf12344444444, CA 91711 List['Local Services', 'Carpet Cleaning']\t5.0\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\n\nif __name__ == \"__main__\":\n\n conf = SparkConf().setMaster(\"local\").setAppName(\"q4\")\n sc = SparkContext(conf=conf)\n\n review = sc.textFile(\"review.csv\").map(lambda line: line.split(\"::\"))\n business = sc.textFile(\"business.csv\").map(lambda line: line.split(\"::\"))\n\n review_by_business = review.map(lambda x: (x[2], x[3]))\n business_cleaned = business.map(lambda x:(x[0], (x[1], x[2])))\n\n join_rdd=business_cleaned.join(review_by_business)\n\n x=join_rdd.map(lambda x: ((x[0],x[1][0]), x[1][1])).mapValues(lambda x:(1,float(x[0]))).reduceByKey(lambda a,b: (a[0]+b[0],a[1]+b[1])).mapValues(lambda x:x[1]/x[0])\n\n top_10 = x.top(10, key=lambda x: x[1])\n for y in top_10:\n print(\"{} {} {} {}\".format(y[0][0],y[0][1][0],y[0][1][1],y[1]))\n\n # x.map(lambda y: \"{} {} {} {}\".format(y[0][0],y[0][1][0],y[0][1][1],y[1])).saveAsTextFile(\"q4.txt\")","repo_name":"poojakudav309/Big-Data-Mutual-Friends-Yelp-Dataset-using-pyspark-and-spark-sql","sub_path":"q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"32084889998","text":"from bs4 import BeautifulSoup as bs\nfrom urllib.request import urlopen\nimport json\nfrom collections import OrderedDict\nimport re\n\ncore = \"https://www.kamailio.org/wiki/cookbooks/5.1.x/core\"\npseudovariables = \"https://www.kamailio.org/wiki/cookbooks/5.1.x/pseudovariables\"\n\n\ndef idx_from_class(x):\n return int(x.attrs[\"class\"].split(\"sectionedit\")[1])\n\nurl = pseudovariables\n\nwith_arg = r\"(\\$\\w*\\()(\\w*)(\\))\\w*\"\n\nhtml = urlopen(url).read().decode('utf-8')\nsoup = bs(html, \"lxml-xml\")\n\nkeywords_start_from = 0 \n\nh2 = soup.find_all(\"h2\", class_ = lambda cls: cls.startswith(\"sectionedit\"))\ncategories_ = list(map(lambda x: (x.text, idx_from_class(x)), h2))\ncategories_ = { idx:c for c,idx in categories_ }\ncategories = OrderedDict(sorted(categories_.items()))\n\nkeywords = soup.find_all(\"h3\", class_ = lambda cls: cls.startswith(\"sectionedit\"))\ndescriptions = soup.find_all(\"div\", class_ = \"level3\")\n\nif len(keywords) != len(descriptions):\n raise Exception(\"scraping error, entry number mismatch!\")\n\nto_json =[]\n\ndef get_category(idx):\n latest = None\n for cat_idx in categories.keys():\n if cat_idx < idx:\n latest = cat_idx\n continue\n else:\n return categories[latest]\n return \"\"\n\n\nfor keyword_entry in zip(keywords[keywords_start_from:], descriptions[keywords_start_from:]):\n keyword = keyword_entry[0].text\n desc = keyword_entry[1].text\n category = get_category(idx_from_class(keyword_entry[0]))\n\n try:\n tok = keyword.split(\"-\")\n if len(tok) > 1:\n instruction = tok[0].rstrip()\n name = tok[1]\n else:\n instruction = name = keyword\n \n as_json = {\n \"text\": instruction,\n \"displayText\": instruction, \n \"description\": name + \"[\" + category + \"]\\n\" + desc,\n \"descriptionMoreURL\": url,\n \"type\": \"variable\",\n \"rightLabel\": name\n }\n \n if re.match(with_arg, keyword) is not None:\n as_json[\"snippet\"] = re.sub(with_arg, \"\\g<1>${1:\\g<2>}\\g<3>\", instruction)\n \n to_json.append(as_json)\n except Exception as err:\n print(\"skipping: \" + keyword + \" cause:\" + str(err))\n\n \nout = \"/home/mvenditto/Scaricati/kamailio_5_1_x_pseudovars.json\"\nwith open(out, \"w+\") as json_out:\n json_out.write(json.dumps(to_json, indent=4))\n\n","repo_name":"mvenditto/autocomplete-kamailio","sub_path":"script/kamadocs_pseudovars.py","file_name":"kamadocs_pseudovars.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32204853389","text":"# -*- coding: utf-8 -*-\n\n__title__ = 'transliterate.tests'\n__author__ = 'Artur Barseghyan'\n__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'\n__license__ = 'GPL 2.0/LGPL 2.1'\n__all__ = ('TransliterateTest',)\n\nimport unittest\nimport six\nfrom six import print_\n\nfrom transliterate.discover import autodiscover\nfrom transliterate.conf import set_setting, get_setting, reset_to_defaults_settings\nfrom transliterate import defaults\n#from transliterate.utils import get_available_language_codes, translit, detect_language, slugify\nfrom transliterate import get_available_language_codes, translit, detect_language, slugify\n#from transliterate.utils import get_available_language_packs\nfrom transliterate import get_available_language_packs\nfrom transliterate.decorators import transliterate_function, transliterate_method\nfrom transliterate.base import TranslitLanguagePack, registry\n\nfrom transliterate.contrib.apps.translipsum import TranslipsumGenerator\n\nPRINT_INFO = True\nTRACK_TIME = False\n\ndef print_info(func):\n \"\"\"\n Prints some useful info.\n \"\"\"\n if not PRINT_INFO:\n return func\n\n def inner(self, *args, **kwargs):\n if TRACK_TIME:\n import simple_timer\n timer = simple_timer.Timer() # Start timer\n\n result = func(self, *args, **kwargs)\n\n if TRACK_TIME:\n timer.stop() # Stop timer\n\n print_('\\n{0}'.format(func.__name__))\n print_('============================')\n print_('\"\"\" {0} \"\"\"'.format(func.__doc__.strip()))\n print_('----------------------------')\n if result is not None:\n try:\n print_(result)\n except Exception as e:\n print_(result.encode('utf8'))\n\n if TRACK_TIME:\n print_('done in {0} seconds'.format(timer.duration))\n\n return result\n return inner\n\n\ndef py2only(func):\n \"\"\"\n Skips the test on Python 3.\n \"\"\"\n if not six.PY3:\n return func\n\n def dummy(self, *args, **kwargs):\n pass\n\n return dummy\n\n\nclass TransliterateTest(unittest.TestCase):\n \"\"\"\n Tests of ``transliterate.utils.translit``.\n \"\"\"\n def setUp(self):\n self.latin_text = u\"Lorem ipsum dolor sit amet\"\n self.armenian_text = u'Լօրեմ իպսում դօլօր սիտ ամետ'\n self.cyrillic_text = u'Лорем ипсум долор сит амет'\n self.ukrainian_cyrillic_text = u'Лорем іпсум долор сіт амет'\n self.georgian_text = u'Ⴊორემ იფსუმ დოლორ სით ამეთ'\n self.greek_text = u'Λορεμ ιψθμ δολορ σιτ αμετ'\n self.hebrew_text = u'Lורeמ יpסuמ דולור סית אמeת'\n #reset_to_defaults_settings()\n\n @print_info\n def test_01_get_available_language_codes(self):\n \"\"\"\n Test ``autodiscover`` and ``get_available_language_codes``.\n \"\"\"\n res = get_available_language_codes()\n res.sort()\n c = ['el', 'hy', 'ka', 'ru', 'uk'] #'he',\n c.sort()\n self.assertEqual(res, c)\n return res\n\n @print_info\n def test_02_translit_latin_to_armenian(self):\n \"\"\"\n Test transliteration from Latin to Armenian.\n \"\"\"\n res = translit(self.latin_text, 'hy')\n self.assertEqual(res, self.armenian_text)\n return res\n\n @print_info\n def test_03_translit_latin_to_georgian(self):\n \"\"\"\n Test transliteration from Latin to Georgian.\n \"\"\"\n res = translit(self.latin_text, 'ka')\n self.assertEqual(res, self.georgian_text)\n return res\n\n @print_info\n def test_04_translit_latin_to_greek(self):\n \"\"\"\n Test transliteration from Latin to Greek.\n \"\"\"\n res = translit(self.latin_text, 'el')\n self.assertEqual(res, self.greek_text)\n return res\n\n @print_info\n def __test_05_translit_latin_to_hebrew(self):\n \"\"\"\n Test transliteration from Latin to Hebrew.\n \"\"\"\n res = translit(self.latin_text, 'he')\n self.assertEqual(res, self.hebrew_text)\n return res\n\n @print_info\n def test_06_translit_latin_to_cyrillic(self):\n \"\"\"\n Test transliteration from Latin to Cyrillic.\n \"\"\"\n res = translit(self.latin_text, 'ru')\n self.assertEqual(res, self.cyrillic_text)\n return res\n\n @print_info\n def test_06_translit_latin_to_ukrainian_cyrillic(self):\n \"\"\"\n Test transliteration from Latin to Ukrainian Cyrillic.\n \"\"\"\n res = translit(self.latin_text, 'uk')\n self.assertEqual(res, self.ukrainian_cyrillic_text)\n return res\n\n @print_info\n def test_07_translit_armenian_to_latin(self):\n \"\"\"\n Test transliteration from Armenian to Latin.\n \"\"\"\n res = translit(self.armenian_text, 'hy', reversed=True)\n self.assertEqual(res, self.latin_text)\n return res\n\n @print_info\n def test_08_translit_georgian_to_latin(self):\n \"\"\"\n Test transliteration from Georgian to Latin.\n \"\"\"\n res = translit(self.georgian_text, 'ka', reversed=True)\n self.assertEqual(res, self.latin_text)\n return res\n\n @print_info\n def test_09_translit_greek_to_latin(self):\n \"\"\"\n Test transliteration from Greek to Latin.\n \"\"\"\n res = translit(self.greek_text, 'el', reversed=True)\n self.assertEqual(res, self.latin_text)\n return res\n\n @print_info\n def __test_10_translit_hebrew_to_latin(self):\n \"\"\"\n Test transliteration from Hebrew to Latin.\n \"\"\"\n res = translit(self.hebrew_text, 'he', reversed=True)\n self.assertEqual(res, self.latin_text)\n return res\n\n @print_info\n def test_11_translit_cyrillic_to_latin(self):\n \"\"\"\n Test transliteration from Cyrillic to Latun.\n \"\"\"\n res = translit(self.cyrillic_text, 'ru', reversed=True)\n self.assertEqual(res, self.latin_text)\n return res\n\n @print_info\n def test_11_translit_ukrainian_cyrillic_to_latin(self):\n \"\"\"\n Test transliteration from Ukrainian Cyrillic to Latun.\n \"\"\"\n res = translit(self.ukrainian_cyrillic_text, 'uk', reversed=True)\n self.assertEqual(res, self.latin_text)\n return res\n\n @print_info\n def test_12_function_decorator(self):\n \"\"\"\n Testing the function decorator from Latin to Armenian.\n \"\"\"\n @transliterate_function(language_code='hy')\n def decorator_test_armenian(text):\n return text\n\n res = decorator_test_armenian(self.latin_text)\n self.assertEqual(res, self.armenian_text)\n\n @print_info\n def test_13_method_decorator(self):\n \"\"\"\n Testing the method decorator from Latin to Cyrillic.\n \"\"\"\n class DecoratorTest(object):\n @transliterate_method(language_code='ru')\n def decorator_test_russian(self, text):\n return text\n\n res = DecoratorTest().decorator_test_russian(self.latin_text)\n self.assertEqual(res, self.cyrillic_text)\n return res\n\n @print_info\n def test_14_function_decorator(self):\n \"\"\"\n Testing the function decorator (reversed) from Armenian to Latin.\n \"\"\"\n @transliterate_function(language_code='hy', reversed=True)\n def decorator_test_armenian_reversed(text):\n return text\n\n res = decorator_test_armenian_reversed(self.armenian_text)\n self.assertEqual(res, self.latin_text)\n return res\n\n @print_info\n def test_15_register_custom_language_pack(self):\n \"\"\"\n Testing registering of a custom language pack.\n \"\"\"\n class ExampleLanguagePack(TranslitLanguagePack):\n \"\"\"\n Example language pack.\n \"\"\"\n language_code = \"example\"\n language_name = \"Example\"\n mapping = (\n u\"abcdefghij\",\n u\"1234567890\",\n )\n\n registry.register(ExampleLanguagePack)\n\n assert 'example' in get_available_language_codes()\n res = translit(self.latin_text, 'example')\n self.assertEqual(res, 'Lor5m 9psum 4olor s9t 1m5t')\n return res\n\n #@py2only\n @print_info\n def test_16_translipsum_generator_armenian(self):\n \"\"\"\n Testing the translipsum generator. Generating lorem ipsum paragraphs in Armenian.\n \"\"\"\n g_am = TranslipsumGenerator(language_code='hy')\n res = g_am.generate_paragraph()\n assert res\n return res\n\n #@py2only\n @print_info\n def test_17_translipsum_generator_georgian(self):\n \"\"\"\n Testing the translipsum generator. Generating lorem ipsum sentence in Georgian.\n \"\"\"\n g_ge = TranslipsumGenerator(language_code='ka')\n res = g_ge.generate_sentence()\n assert res\n return res\n\n #@py2only\n @print_info\n def test_18_translipsum_generator_greek(self):\n \"\"\"\n Testing the translipsum generator. Generating lorem ipsum sentence in Greek.\n \"\"\"\n g_el = TranslipsumGenerator(language_code='el')\n res = g_el.generate_sentence()\n assert res\n return res\n\n #@py2only\n @print_info\n def __test_19_translipsum_generator_hebrew(self):\n \"\"\"\n Testing the translipsum generator. Generating lorem ipsum sentence in Hebrew.\n \"\"\"\n g_he = TranslipsumGenerator(language_code='he')\n res = g_he.generate_sentence()\n assert res\n return res\n\n #@py2only\n @print_info\n def test_20_translipsum_generator_cyrillic(self):\n \"\"\"\n Testing the translipsum generator. Generating lorem ipsum sentence in Cyrillic.\n \"\"\"\n g_ru = TranslipsumGenerator(language_code='ru')\n res = g_ru.generate_sentence()\n assert res\n return res\n\n @print_info\n def test_20_translipsum_generator_ukrainian_cyrillic(self):\n \"\"\"\n Testing the translipsum generator. Generating lorem ipsum sentence in Ukrainian Cyrillic.\n \"\"\"\n g_uk = TranslipsumGenerator(language_code='uk')\n res = g_uk.generate_sentence()\n assert res\n return res\n\n @print_info\n def test_21_language_detection_armenian(self):\n \"\"\"\n Testing language detection. Detecting Amenian.\n \"\"\"\n res = detect_language(self.armenian_text)\n self.assertEqual(res, 'hy')\n return res\n\n @print_info\n def test_22_language_detection_georgian(self):\n \"\"\"\n Testing language detection. Detecting Georgian.\n \"\"\"\n res = detect_language(self.georgian_text)\n self.assertEqual(res, 'ka')\n return res\n\n @print_info\n def test_23_language_detection_greek(self):\n \"\"\"\n Testing language detection. Detecting Greek.\n \"\"\"\n #set_setting('DEBUG', True)\n res = detect_language(self.greek_text)\n #reset_to_defaults_settings()\n self.assertEqual(res, 'el')\n return res\n\n @print_info\n def __test_24_language_detection_hebrew(self):\n \"\"\"\n Testing language detection. Detecting Hebrew.\n \"\"\"\n res = detect_language(self.hebrew_text)\n self.assertEqual(res, 'he')\n return res\n\n @print_info\n def test_25_language_detection_cyrillic(self):\n \"\"\"\n Testing language detection. Detecting Russian (Cyrillic).\n \"\"\"\n res = detect_language(self.cyrillic_text)\n self.assertEqual(res, 'ru')\n return res\n\n @print_info\n def __test_25_language_detection_ukrainian_cyrillic(self):\n \"\"\"\n Testing language detection. Detecting Ukrainian (Cyrillic).\n \"\"\"\n res = detect_language(self.ukrainian_cyrillic_text)\n self.assertEqual(res, 'uk')\n return res\n\n @print_info\n def test_26_slugify_armenian(self):\n \"\"\"\n Testing slugify from Armenian.\n \"\"\"\n res = slugify(self.armenian_text)\n self.assertEqual(res, 'lorem-ipsum-dolor-sit-amet')\n return res\n\n @print_info\n def test_27_slugify_georgian(self):\n \"\"\"\n Testing slugify from Georgian.\n \"\"\"\n res = slugify(self.georgian_text)\n self.assertEqual(res, 'lorem-ipsum-dolor-sit-amet')\n return res\n\n @print_info\n def test_28_slugify_greek(self):\n \"\"\"\n Testing slugify from Greek.\n \"\"\"\n res = slugify(self.greek_text)\n self.assertEqual(res, 'lorem-ipsum-dolor-sit-amet')\n return res\n\n @print_info\n def __test_29_slugify_hebrew(self):\n \"\"\"\n Testing slugify from Hebrew.\n \"\"\"\n res = slugify(self.hebrew_text)\n self.assertEqual(res, 'lorem-ipsum-dolor-sit-amet')\n return res\n\n @print_info\n def test_30_slugify_cyrillic(self):\n \"\"\"\n Testing slugify from Cyrillic.\n \"\"\"\n res = slugify(self.cyrillic_text)\n self.assertEqual(res, 'lorem-ipsum-dolor-sit-amet')\n return res\n\n @print_info\n def test_30_slugify_ukrainian_cyrillic(self):\n \"\"\"\n Testing slugify from Ukrainian Cyrillic.\n \"\"\"\n res = slugify(self.ukrainian_cyrillic_text, language_code='uk')\n self.assertEqual(res, 'lorem-ipsum-dolor-sit-amet')\n return res\n\n @print_info\n def test_31_override_settings(self):\n \"\"\"\n Testing settings override.\n \"\"\"\n def override_settings():\n return get_setting('LANGUAGE_DETECTION_MAX_NUM_KEYWORDS')\n\n self.assertEqual(defaults.LANGUAGE_DETECTION_MAX_NUM_KEYWORDS, override_settings())\n\n set_setting('LANGUAGE_DETECTION_MAX_NUM_KEYWORDS', 10)\n\n self.assertEqual(10, override_settings())\n\n return override_settings()\n\n @print_info\n def test_32_auto_translit_reversed(self):\n \"\"\"\n Test automatic reversed translit (from target script to source script) for Armenian, Georgian, Greek\n and Russian (Cyrillic).\n \"\"\"\n res = []\n texts = [\n self.armenian_text,\n self.georgian_text,\n self.greek_text,\n #self.hebrew_text,\n self.cyrillic_text\n ]\n\n for text in texts:\n r = translit(text, reversed=True)\n self.assertEqual(r, self.latin_text)\n res.append(r)\n\n return res\n\n @print_info\n def test_33_register_unregister(self):\n \"\"\"\n Testing register/unregister.\n \"\"\"\n from transliterate.contrib.languages.hy.translit_language_pack import ArmenianLanguagePack\n\n class A(TranslitLanguagePack):\n language_code = \"ru\"\n language_name = \"Example\"\n mapping = (\n u\"abcdefghij\",\n u\"1234567890\",\n )\n # Since key `ru` already exists in the registry it can't be replaced (without force-register).\n res = registry.register(A)\n self.assertTrue(not res)\n\n # Now with force-register it can.\n res = registry.register(A, force=True)\n self.assertTrue(res)\n\n # Once we have it there and it's forced, we can't register another.\n res = registry.register(A, force=True)\n self.assertTrue(not res)\n\n # Unregister non-forced language pack.\n res = registry.unregister(ArmenianLanguagePack)\n self.assertTrue(res and not ArmenianLanguagePack.language_code in get_available_language_codes())\n\n res = registry.unregister(A)\n self.assertTrue(not res and A.language_code in get_available_language_codes())\n\n @print_info\n def __test_29_mappings(self):\n \"\"\"\n Testing mappings.\n \"\"\"\n for language_pack in get_available_language_packs():\n print_('Testing language pack {0} {1}'.format(language_pack.language_code, language_pack.language_name))\n print_('Reversed test:')\n for letter in language_pack.mapping[1]:\n print_(letter, ' --> ', translit(letter, language_pack.language_code, reversed=True))\n\n print_('Normal test:')\n for letter in language_pack.mapping[0]:\n print_(letter, ' --> ', translit(letter, language_pack.language_code))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"weezel/ITIS13","sub_path":"transliterate/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":16334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"13247002923","text":"\"\"\"\n Modulul bariera, implementat de echipa ASC\n si luat de la lab\n\"\"\"\nimport sys\nimport threading\nfrom threading import *\n\n\"\"\"\n Ugly comments are ugly - nu folositi comentarii lungi pe aceeasi linie cu\n codul, aici sunt doar pentru 'teaching purpose'\n\"\"\"\n\nclass SimpleBarrier():\n \"\"\" Bariera ne-reentranta, implementata folosind un semafor \"\"\"\n\n def __init__(self, num_threads):\n self.num_threads = num_threads\n self.count_threads = self.num_threads\n self.counter_lock = Lock() # protejam decrementarea numarului de threaduri\n self.threads_sem = Semaphore(0) # contorizam numarul de threaduri\n\n def wait(self):\n \"\"\"\n Apelata de threaduri pentru a astepta sa ajunga toate in acest punct.\n Cand au ajuns toate, se vor debloca si continua executia.\n \"\"\"\n with self.counter_lock:\n self.num_threads -= 1\n if self.num_threads == 0: # a ajuns la bariera si ultimul thread\n for i in range(self.count_threads):\n self.threads_sem.release() # contorul semaforului devine count_threads\n self.threads_sem.acquire() # n-1 threaduri se blocheaza aici\n # contorul semaforului se decrementeaza de count_threads ori\n\nclass ReusableBarrierCond():\n \"\"\" Bariera reentranta, implementata folosind o variabila conditie \"\"\"\n\n def __init__(self, num_threads):\n self.num_threads = num_threads\n self.count_threads = self.num_threads\n self.cond = Condition(Lock())\n\n def wait(self):\n self.cond.acquire() # intra in regiunea critica\n self.count_threads -= 1;\n if self.count_threads == 0:\n self.cond.notify_all() # trezeste toate thread-urile, acestea vor putea reintra in regiunea critica dupa release\n self.count_threads=self.num_threads\n else:\n self.cond.wait(); # iese din regiunea critica, se blocheaza, cand se deblocheaza face acquire pe lock\n self.cond.release(); # iesim din regiunea critica\n\nclass ReusableBarrierSem():\n \"\"\" Bariera reentranta, implementata folosind semafoare \"\"\"\n\n def __init__(self, num_threads):\n self.num_threads = num_threads\n self.count_threads1 = self.num_threads\n self.count_threads2 = self.num_threads\n\n self.counter_lock = Lock() # protejam decrementarea numarului de threaduri\n self.threads_sem1 = Semaphore(0) # contorizam numarul de threaduri pentru prima etapa\n self.threads_sem2 = Semaphore(0) # contorizam numarul de threaduri pentru a doua etapa\n\n def wait(self):\n self.phase1()\n self.phase2()\n\n def phase1(self):\n with self.counter_lock:\n self.count_threads1 -= 1\n if self.count_threads1 == 0:\n for i in range(self.num_threads):\n self.threads_sem1.release()\n self.count_threads2 = self.num_threads\n\n self.threads_sem1.acquire()\n\n def phase2(self):\n with self.counter_lock:\n self.count_threads2 -= 1\n if self.count_threads2 == 0:\n for i in range(self.num_threads):\n self.threads_sem2.release()\n self.count_threads1 = self.num_threads\n\n self.threads_sem2.acquire()\n\n","repo_name":"andrei-datcu/-homework-ASC","sub_path":"tema1/barrier.py","file_name":"barrier.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"36390818876","text":"import unittest\nfrom ostoskori import Ostoskori\nfrom tuote import Tuote\nfrom ostos import Ostos\n\nclass TestOstoskori(unittest.TestCase):\n def setUp(self):\n self.kori = Ostoskori()\n\n def test_ostoskorin_hinta_ja_tavaroiden_maara_alussa(self):\n self.assertEqual(self.kori.hinta(), 0)\n self.assertEqual(self.kori.tavaroita_korissa(), 0)\n \n def test_yhden_tuotteen_lisaamisen_jalkeen_korissa_yksi_tavara(self):\n maito = Tuote(\"Maito\", 3)\n\n self.kori.lisaa_tuote(maito)\n exp_result = 1\n self.assertEqual(exp_result, len(self.kori.ostokset()))\n \n def test_yhden_tuotteen_lisaamisen_jalkeen_korin_hinta_tuotteen_hinta(self):\n maito = Tuote(\"Maito\", 3)\n\n self.kori.lisaa_tuote(maito)\n\n self.assertEqual(3, self.kori.hinta())\n \n \n def test_kahden_eri_tuotteen_lisaamisen_jalkeen_korissa_on_kaksi_tavaraa(self):\n maito = Tuote(\"Maito\", 3)\n leipa = Tuote(\"Leipä\", 2)\n\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(leipa)\n \n self.assertEqual(2, len(self.kori.ostokset()))\n \n def test_kahden_eri_tuotteen_lisaamisen_jalkeen_korin_hinta_tuotteiden_hinta(self):\n maito = Tuote(\"Maito\", 3)\n leipa = Tuote(\"Leipä\", 2)\n\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(leipa)\n\n self.assertEqual(5, self.kori.hinta())\n \n def test_kahden_saman_tuotteen_lisaamisen_jalkeen_korin_hinta_2_kertaa_tuotteen_hinta(self):\n maito = Tuote(\"Maito\", 3)\n\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(maito)\n self.assertEqual(6, self.kori.hinta())\n \n def test_yhden_tuotteen_lisaamisen_jalkeen_korissa_yksi_ostosolio(self):\n maito = Tuote(\"Maito\", 3)\n\n\n self.kori.lisaa_tuote(maito)\n exp_result = 1\n self.assertEqual(exp_result, len(self.kori.ostokset()))\n \n self.assertIsInstance(self.kori.ostokset()[0], Ostos)\n\n # Step 9\n def test_yhden_tuotteen_lisaamisen_jalkeen_korissa_yksi_ostosolio_jolla_oikea_tuotteen_nimi_ja_maara(self):\n maito = Tuote(\"Maito\", 3)\n self.kori.lisaa_tuote(maito)\n \n ostos = self.kori.ostokset()[0]\n\n self.assertEqual(\"Maito\", ostos.tuotteen_nimi())\n self.assertEqual(1, ostos.lukumaara())\n \n def test_kahden_eri_tuotteen_lisaamisen_jalkeen_korissa_2_ostosta(self):\n maito = Tuote(\"Maito\", 3)\n leipa = Tuote(\"Leipä\", 2)\n\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(leipa)\n\n ostos1 = self.kori.ostokset()[0]\n ostos2 = self.kori.ostokset()[1]\n\n self.assertIsInstance(ostos1, Ostos)\n self.assertIsInstance(ostos2, Ostos)\n\n # Step 11\n def test_kahden_saman_tuotteen_lisaamisen_jalkeen_ostoskori_sisaltaa_yhden_ostoksen(self):\n maito = Tuote(\"Maito\", 3)\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(maito)\n\n self.assertEqual(1, len(self.kori.ostokset()))\n\n ostos1 = self.kori.ostokset()[0]\n\n # Step 12\n def test_kahden_saman_tuotteen_lisaamisen_jalkeen_ostoskori_sisaltaa_tuotteen_samalla_nimella_ja_lukumaaralla_2(self):\n maito = Tuote(\"Maito\", 3)\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(maito)\n\n ostos1 = self.kori.ostokset()[0]\n\n self.assertEqual(2, ostos1.lukumaara())\n self.assertEqual(maito.nimi(), ostos1.tuotteen_nimi())\n \n # step 13\n def test_toinen_samoista_tuotteista_poistetaan_korista(self):\n maito = Tuote(\"Maito\", 3)\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(maito)\n\n self.kori.poista_tuote(maito)\n\n ostos1 = self.kori.ostokset()[0]\n self.assertEqual(1, ostos1.lukumaara())\n \n # Step 14\n def test_koriin_lisatty_tuote_ja_poistetaan_kori_tyhjenee(self):\n maito = Tuote(\"Maito\", 3)\n self.kori.lisaa_tuote(maito)\n \n self.kori.poista_tuote(maito)\n self.assertTrue(len(self.kori.ostokset())==0)\n \n def test_tyhjenna_kori(self):\n maito = Tuote(\"Maito\", 3)\n leipa = Tuote(\"Leipä\", 2)\n\n self.kori.lisaa_tuote(maito)\n self.kori.lisaa_tuote(leipa)\n self.kori.lisaa_tuote(leipa)\n\n self.kori.tyhjenna()\n\n self.assertTrue(len(self.kori.ostokset()) == 0)\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"attkauppi/ohtu-2021","sub_path":"viikko4/tdd-ostoskori/src/tests/ostoskori_test.py","file_name":"ostoskori_test.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"86333608060","text":"#!/usr/bin/env python3\nimport odrive\nfrom odrive.enums import *\n\nfrom UDPComms import Subscriber, Publisher, timeout\nimport time\n\nimport os\nif os.geteuid() != 0:\n exit(\"You need to have root privileges to run this script.\\nPlease try again, this time using 'sudo'. Exiting.\")\n\ncmd = Subscriber(8830, timeout = 0.3)\ntelemetry = Publisher(8810)\n\nprint(\"finding an odrives...\")\n\nmiddle_odrive = odrive.find_any(serial_number=\"206230804648\")\nprint(\"found front odrive\")\nfront_odrive = odrive.find_any(serial_number=\"206C35733948\")\nprint(\"found middle odrive\")\nback_odrive = odrive.find_any(serial_number=\"207D35903948\")\nprint(\"found back odrive\")\n\nprint(\"found all odrives\")\n\n\ndef clear_errors(odrive):\n if odrive.axis0.error:\n print(\"axis 0\", odrive.axis0.error)\n odrive.axis0.error = 0\n if odrive.axis1.error:\n print(\"axis 1\", odrive.axis1.error)\n odrive.axis1.error = 0\n\n if odrive.axis0.motor.error:\n print(\"motor 0\", odrive.axis0.motor.error)\n odrive.axis0.motor.error = 0\n if odrive.axis1.motor.error:\n print(\"motor 1\", odrive.axis1.motor.error)\n odrive.axis1.motor.error = 0\n\n if odrive.axis0.encoder.error:\n print(\"encoder 0\", odrive.axis0.encoder.error)\n odrive.axis0.encoder.error = 0\n if odrive.axis1.encoder.error:\n print(\"encoder 1\", odrive.axis1.encoder.error)\n odrive.axis1.encoder.error = 0\n\ndef send_state(odrive, state):\n try:\n odrive.axis0.requested_state = AXIS_STATE_IDLE\n except:\n pass\n try:\n odrive.axis1.requested_state = AXIS_STATE_IDLE\n except:\n pass\n \n\nsend_state(front_odrive, AXIS_STATE_IDLE)\nsend_state(middle_odrive, AXIS_STATE_IDLE)\nsend_state(back_odrive, AXIS_STATE_IDLE)\n\n#v_vain = .05\n#v_int_gain = .1\n#odrive_array = [front_odrive,middle_odrive,back_odrive]\n#for odrive in odrive_array:\nfront_odrive.axis0.controller.config.vel_gain = .2\nfront_odrive.axis1.controller.config.vel_gain = .2\nmiddle_odrive.axis0.controller.config.vel_gain = .2\nmiddle_odrive.axis1.controller.config.vel_gain = .2\nback_odrive.axis0.controller.config.vel_gain = .2\nback_odrive.axis1.controller.config.vel_gain = .2\n#print front_odrive.axis0.controller.config\nwhile True:\n try:\n msg = cmd.get()\n print(msg)\n\n try:\n telemetry.send( [middle_odrive.vbus_voltage,\n front_odrive.axis0.motor.current_control.Iq_measured,\n front_odrive.axis1.motor.current_control.Iq_measured,\n middle_odrive.axis0.motor.current_control.Iq_measured,\n middle_odrive.axis1.motor.current_control.Iq_measured,\n back_odrive.axis0.motor.current_control.Iq_measured,\n back_odrive.axis1.motor.current_control.Iq_measured] )\n except:\n pass\n\n clear_errors(front_odrive)\n clear_errors(middle_odrive)\n clear_errors(back_odrive)\n\n if (msg['t'] == 0 and msg['f'] == 0):\n send_state(front_odrive, AXIS_STATE_IDLE)\n send_state(middle_odrive, AXIS_STATE_IDLE)\n send_state(back_odrive, AXIS_STATE_IDLE)\n else:\n middle_odrive.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\n middle_odrive.axis1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\n front_odrive.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\n front_odrive.axis1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\n back_odrive.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\n back_odrive.axis1.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL\n\n middle_odrive.axis0.controller.vel_setpoint = msg['f'] + msg['t']\n middle_odrive.axis1.controller.vel_setpoint = msg['f'] - msg['t']\n front_odrive.axis0.controller.vel_setpoint = -msg['f'] - msg['t']\n front_odrive.axis1.controller.vel_setpoint = msg['f'] - msg['t']\n\n front_odrive.axis0.watchdog_feed()\n front_odrive.axis1.watchdog_feed()\n middle_odrive.axis0.watchdog_feed()\n middle_odrive.axis1.watchdog_feed()\n back_odrive.axis0.watchdog_feed()\n back_odrive.axis1.watchdog_feed()\n\n # back odrive is reversed left to right\n back_odrive.axis0.controller.vel_setpoint = -msg['f'] - msg['t'] \n back_odrive.axis1.controller.vel_setpoint = msg['f'] - msg['t']\n\n except timeout:\n print(\"Sending safe command\")\n send_state(front_odrive, AXIS_STATE_IDLE)\n send_state(middle_odrive, AXIS_STATE_IDLE)\n send_state(back_odrive, AXIS_STATE_IDLE)\n middle_odrive.axis0.controller.vel_setpoint = 0\n middle_odrive.axis1.controller.vel_setpoint = 0\n front_odrive.axis0.controller.vel_setpoint = 0\n front_odrive.axis1.controller.vel_setpoint = 0\n back_odrive.axis0.controller.vel_setpoint = 0\n back_odrive.axis1.controller.vel_setpoint = 0\n except:\n print(\"shutting down\")\n send_state(front_odrive, AXIS_STATE_IDLE)\n send_state(middle_odrive, AXIS_STATE_IDLE)\n send_state(back_odrive, AXIS_STATE_IDLE)\n middle_odrive.axis0.controller.vel_setpoint = 0\n middle_odrive.axis1.controller.vel_setpoint = 0\n front_odrive.axis0.controller.vel_setpoint = 0\n front_odrive.axis1.controller.vel_setpoint = 0\n back_odrive.axis0.controller.vel_setpoint = 0\n back_odrive.axis1.controller.vel_setpoint = 0\n raise\n\n\n# finally:\n# print(\"Fianlly shutting down\")\n# send_state(front_odrive, AXIS_STATE_IDLE)\n# send_state(middle_odrive, AXIS_STATE_IDLE)\n# send_state(back_odrive, AXIS_STATE_IDLE)\n# middle_odrive.axis0.controller.vel_setpoint = 0\n# middle_odrive.axis1.controller.vel_setpoint = 0\n# front_odrive.axis0.controller.vel_setpoint = 0\n# front_odrive.axis1.controller.vel_setpoint = 0\n# back_odrive.axis0.controller.vel_setpoint = 0\n# back_odrive.axis1.controller.vel_setpoint = 0\n\n","repo_name":"stanfordroboticsclub/RoverODrive","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6129,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"46"} +{"seq_id":"29992489636","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport urllib\n\npath = os.getcwd() #печать текущей директории\nprint(path)\nURL = input('enter site address: ') #http://www.ya.ru\nfolder = input('enter name of folder: ')\nwhile True:\n try:\n os.mkdir(folder)\n except:\n print('already exist')\n folder = input('enter other name: ')\n else:\n break\n\n\npage = requests.get(URL)\nsoup = BeautifulSoup(page.content, 'html.parser')\n\nresults = soup.find_all('img', src=re.compile('\\/[a-z\\-\\d\\/]+.jpg'))\nprint(results)\n\narr=[]\nfor i in range(0, len(results)):\n arr.append(results[i]['src'])\n img_data = requests.get(URL + arr[i]).content\n handler = open(folder+'/'+str(i)+'.jpg', 'wb')\n handler.write(img_data)\n handler.close()\n\n\n\n\n# site = open('index.html')\n# content = site.read()\n#\n# soup = BeautifulSoup(content, 'html.parser')\n#\n# print(soup.title.string)\n# print(soup.find_all('p'))","repo_name":"v910423/Python-Ozon","sub_path":"parcer/Lesson_7_parcer.py","file_name":"Lesson_7_parcer.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"21719244002","text":"import os,sys,argparse,glob,re,bisect\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nimport operator\nimport matplotlib\n# matplotlib.use('Agg')\n# import matplotlib.pyplot as plt\n# #matplotlib.rcParams['agg.path.chunksize'] = 10000\n# matplotlib.rcParams['font.size']=16\n# import seaborn as sns\n# sns.set(font_scale=1.2)\n# sns.set_style(\"whitegrid\", {'axes.grid' : False})\n# import scipy\n# import scipy.optimize\n# sns.set_style(\"ticks\")\n# matplotlib.rcParams[\"font.sans-serif\"] = [\"Arial\"]\n\nfrom scipy.stats import gamma\n\n\nproject_dir='/Volumes/zanglab/zw5j/since2019_projects/phase_separation_FEpiTR/f12_KS_test_Rename'\n# project_dir='/nv/vol190/zanglab/zw5j/since2019_projects/phase_separation_FEpiTR/'\n\n# ==== read TFMS CP\ninfile = '{}/f1_TF_cluster_potential/f2_cor_CP_SE_AICAP/data/TFMS_CP_SE_enrich.csv'.format(project_dir)\ndf1 = pd.read_csv('../f1_TF_cluster_potential/f2_cor_CP_SE_AICAP/data//TFMS_CP_SE_enrich.csv',index_col=0)\ndf1 = df1[['#TFMS','len-of-TFMS','log10-dis ks_2samp-s signed','motif SE overlapped','enrich-at-SE-fisher-exact-s', 'enrich-at-SE-fisher-exact-p']]\ndf1.columns = ['#TFMS','length of TFMS','TFMS CP','#TFMS overlap SE','SE enrichment odds ratio', 'SE enrichment pvalue']\n\n# ==== read Gamma k by p-value\ninfile = '{}/f5_gamma_fit/f4_TFMS_gamma_alpha_by_pvalue/TFMS_gamma_alpha_combined.csv'.format(project_dir)\ndf2 = pd.read_csv(infile,index_col=0)\n# df2 = df2[['alpha','scale',\n# '#TFMS p5','alpha p5','scale p5',\n# '#TFMS p6','alpha p6','scale p6',\n# '#TFMS p7','alpha p7','scale p7',]]\n# df2.columns = ['Gamma k','Gamma theta',\n# '#TFMS p<1e-5','Gamma k p<1e-5','Gamma theta p<1e-5',\n# '#TFMS p<1e-6','Gamma k p<1e-6','Gamma theta p<1e-6',\n# '#TFMS p<1e-7','Gamma k p<1e-7','Gamma theta p<1e-7',]\n\ndf2 = df2[['alpha','scale',\n 'alpha p5','alpha p6','alpha p7']]\ndf2.columns = ['Gamma k','Gamma theta',\n 'Gamma k p<1e-5','Gamma k p<1e-6','Gamma k p<1e-7',]\n \n# ==== read Gamma k by number\ninfile = '{}/f5_gamma_fit/f6_TFMS_gamma_alpha_by_num/TFMS_gamma_alpha_combined.csv'.format(project_dir)\ndf3 = pd.read_csv(infile,index_col=0)\n# df3 = df3[['#TFMS top2k','alpha top2k','scale top2k',\n# '#TFMS top5k','alpha top5k','scale top5k',\n# '#TFMS top10k','alpha top10k','scale top10k',\n# '#TFMS top20k','alpha top20k','scale top20k',\n# '#TFMS top50k','alpha top50k','scale top50k',\n# '#TFMS top100k','alpha top100k','scale top100k',]]\n \n# df3.columns = ['#TFMS top2k','Gamma k top2k','Gamma theta top2k',\n# '#TFMS top5k','Gamma k top5k','Gamma theta top5k',\n# '#TFMS top10k','Gamma k top10k','Gamma theta top10k',\n# '#TFMS top20k','Gamma k top20k','Gamma theta top20k',\n# '#TFMS top50k','Gamma k top50k','Gamma theta top50k',\n# '#TFMS top100k','Gamma k top100k','Gamma theta top100k',] \n\ndf3 = df3[['alpha top2k','alpha top5k','alpha top10k',\n 'alpha top20k','alpha top50k','alpha top100k']]\n \ndf3.columns = ['Gamma k top2k','Gamma k top5k','Gamma k top10k',\n 'Gamma k top20k','Gamma k top50k','Gamma k top100k'] \n\n# ==== combine all data\ndf = pd.concat([df1,df2,df3],axis=1)\ndf = df.sort_values(by='TFMS CP',ascending=False)\n\nwriter = pd.ExcelWriter('data/TFMS_CP_Gamma_fit.xlsx')\ndf.to_excel(writer)\nwriter.close()\n \n\n\n","repo_name":"zhenjiawang157/transcriptional_condensates","sub_path":"f12_KS_test_Rename/fz_data_organization/TFMS_CP_gamma_fit.py","file_name":"TFMS_CP_gamma_fit.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"21524227138","text":"from dash import Dash, html, dcc, Input, Output, State, dash_table\r\nfrom sklearn import linear_model\r\nimport plotly.express as px\r\nfrom datetime import datetime, timedelta\r\nfrom plotly.subplots import make_subplots\r\nimport random\r\nimport plotly.graph_objects as go\r\nfrom utils import *\r\nimport warnings\r\nimport pickle\r\nimport numpy as np\r\nfrom matplotlib.colors import LinearSegmentedColormap\r\n\r\nwarnings.filterwarnings('ignore')\r\n\r\napp = Dash(__name__)\r\n\r\n# Comment the next line and uncomment the 3 after to do your tests to avoid loading time, but the nans might fail your\r\n# tests\r\n# df, variables_each_country = get_preprocessed_df()\r\n# url = 'https://covid.ourworldindata.org/data/owid-covid-data.csv'\r\n# df = pd.read_csv(url)\r\n# variables_each_country = get_var_each_country()\r\n\r\nwith open('df.pickle', 'rb') as dffile:\r\n df, variables_each_country = pickle.load(dffile)\r\n\r\ntemp_df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})\r\n\r\nfifa_df = get_fifa_data(df)\r\n\r\n\r\ndef fifa_plot(df):\r\n fig = px.scatter(df, x='total_cases_rank', y='fifa_rank',\r\n hover_name='country_abrv',\r\n hover_data=['fifa_rank', 'total_cases_rank']\r\n )\r\n\r\n fig.update_traces(marker_color='#000000')\r\n\r\n min_dim = df[['fifa_rank', 'total_cases_rank']].max().idxmax()\r\n maxi = df[min_dim].max()\r\n for i, row in df.iterrows():\r\n country_iso = row['iso_2']\r\n fig.add_layout_image(\r\n dict(\r\n source=f\"https://raw.githubusercontent.com/matahombres/CSS-Country-Flags-Rounded/master/flags/{country_iso}.png\",\r\n xref=\"x\",\r\n yref=\"y\",\r\n xanchor=\"center\",\r\n yanchor=\"middle\",\r\n x=row[\"total_cases_rank\"],\r\n y=row[\"fifa_rank\"],\r\n sizex=np.sqrt(row[\"total_cases\"] / df[\"total_cases\"].max()) * maxi * 0.025 + maxi * 0.03,\r\n sizey=np.sqrt(row[\"total_cases\"] / df[\"total_cases\"].max()) * maxi * 0.025 + maxi * 0.03,\r\n sizing=\"contain\",\r\n opacity=0.95,\r\n layer=\"above\"\r\n )\r\n )\r\n\r\n fig.update_layout(\r\n title_text=\"COVID cases on 09-04-2020 vs Fifa World Ranking for the same date\",\r\n height=600, width=1000, plot_bgcolor=\"#FFFFFF\")\r\n\r\n # Set y-axes titles\r\n fig.update_yaxes(title_text=\"Fifa Rank\", showgrid=True,\r\n griddash='dash', gridcolor='#D4D4D4')\r\n fig.update_xaxes(title_text=\"COVID Cases Rank\", showgrid=True,\r\n griddash='dash', gridcolor='#D4D4D4')\r\n\r\n return fig\r\n\r\n\r\nall_col = list(df.columns)\r\nfor col in columns_to_remove:\r\n all_col.remove(col)\r\nfor col in columns_fixed:\r\n if col in all_col:\r\n all_col.remove(col)\r\n\r\noriginal_df = df\r\nconstraint_added = []\r\n# none_all_col = columns_fixed.copy()\r\n# none_all_col.insert(0, 'None')\r\n\r\nvariables_first_country = variables_each_country[df['location'][0]]\r\n\r\nmonths_list = get_list_months(df)\r\nslider_months = [month[:3] + month[5:] for month in months_list]\r\nmonths_df = get_month_df(df)\r\n\r\ntrust_df = pd.read_csv('share-who-trust-government.csv')\r\ntrust_df = trust_df.drop(['Code', 'Year'], axis=1)\r\ntrust_df.columns = ['location', 'trust_in_gov']\r\nfor countr in df['location'].unique():\r\n if countr not in list(trust_df['location']):\r\n trust_df.loc[len(trust_df)] = [countr, float(\"nan\")]\r\n\r\ncol_fixed_new_df = columns_fixed.copy()\r\ncol_fixed_new_df.insert(0, 'trust_in_gov')\r\n\r\nfiltering_dict = info_filtering(df)\r\n\r\napp.layout = html.Div([\r\n dcc.Store(data=df.to_json(date_format='iso', orient='split'), id='df'),\r\n dcc.Store(data=months_df.to_json(date_format='iso', orient='split'), id='month-df'),\r\n html.H1('COVID 19: The Data',\r\n style={\r\n 'textAlign': 'center',\r\n 'color': 'black',\r\n 'font_size': '36px'\r\n }),\r\n html.H1(\r\n 'Data filtering',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'black'\r\n }\r\n ),\r\n html.Div([\r\n html.Label(\"Activate filtering\"),\r\n dcc.RadioItems(['Active', 'Reset'], 'Active', id='radio-filtering'),\r\n html.Br(),\r\n html.Div([\r\n dcc.Dropdown(columns_fixed, columns_fixed[0], id='variable-to-filter')\r\n ], style={'width': '39%', 'display': 'inline-block'}),\r\n html.Div([\r\n dcc.Dropdown(['>', '>=', '=', '<', '<='], '>', id='sign-to-filter')\r\n ], style={'width': '20%', 'display': 'inline-block'}),\r\n html.Div([\r\n dcc.Input(id='num-to-filter', type='number', value=0),\r\n ], style={'width': '39%', 'display': 'inline-block'}),\r\n html.Br(),\r\n html.Button(id='filtering-button', n_clicks=0, children='Filter'),\r\n html.Div(id='times-clicked')\r\n ], style={'width': '48%', 'display': 'inline-block'}),\r\n html.Div([\r\n dash_table.DataTable(id='filter-table')\r\n ], style={'width': '48%', 'display': 'inline-block', 'float': 'right'}),\r\n html.Br(),\r\n html.Div([html.H1('A look at the world:'),\r\n dcc.Dropdown(df.columns, 'total_cases', id='chorplethdropdown'),\r\n dcc.Graph(id='Choropleth Map'),\r\n dcc.Slider(\r\n 0,\r\n len(months_list) - 1,\r\n marks={i: str(slider_months[i]) for i in range(len(slider_months))},\r\n updatemode='mouseup',\r\n value=0,\r\n id='monthchoroplethmap'\r\n )\r\n ]),\r\n html.Br(),\r\n html.H1(\r\n 'Evolution of multiple variables in time',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'black'\r\n }\r\n ),\r\n html.Div([\r\n html.Label(\"Country or continent\"),\r\n dcc.Dropdown([country for country in df['location'].unique()], df['location'][0],\r\n id='country-continent-choice'),\r\n\r\n html.Br(),\r\n html.Label(\"Variables to plot (max 5)\"),\r\n dcc.Dropdown(variables_first_country, variables_first_country[0], id='y-axis', multi=True)\r\n ]),\r\n\r\n dcc.Graph(id='variables-graph'),\r\n\r\n html.Br(),\r\n html.H1(\r\n 'Correlation of variables',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'black'\r\n }\r\n ),\r\n html.H3(\r\n 'Correlations over time',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'blue'\r\n }\r\n ),\r\n html.Div([\r\n html.Div([\r\n html.Label('Country or continent'),\r\n dcc.Dropdown([country for country in df['location'].unique()], df['location'][0], id='country-choice')\r\n ], style={'width': '48%', 'float': 'left', 'display': 'inline-block'}),\r\n ]),\r\n html.Div([\r\n dash_table.DataTable(id='corr-table-not-cumu')\r\n ]),\r\n html.Br(),\r\n html.H3(\r\n 'Correlations cumulative with fixed variables',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'blue'\r\n }\r\n ),\r\n html.Div([\r\n dash_table.DataTable(id='corr-table-cumu')\r\n ]),\r\n\r\n html.Br(),\r\n html.H1(\r\n 'Variables dependencies for all countries',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'black'\r\n }\r\n ),\r\n html.Div([\r\n html.Div([\r\n html.Label('x-axis'),\r\n dcc.Dropdown(all_col, all_col[0], id='x-axis-dependence')\r\n ], style={'width': '30%', 'float': 'left', 'display': 'inline-block'}),\r\n html.Div([\r\n html.Label('y-axis'),\r\n dcc.Dropdown(all_col, all_col[1], id='y-axis-dependence'),\r\n ], style={'width': '30%', 'display': 'inline-block'}),\r\n html.Div([\r\n html.Label('Size of the dots'),\r\n dcc.Dropdown(col_fixed_new_df, col_fixed_new_df[0], id='size-dot-dependence'),\r\n ], style={'width': '30%', 'display': 'inline-block'}),\r\n ], style={'margin-bottom': '0.5cm'}),\r\n dcc.Graph(id='total-dependence-graph'),\r\n dcc.Slider(\r\n 0,\r\n len(months_list) - 1,\r\n marks={i: str(slider_months[i]) for i in range(len(slider_months))},\r\n updatemode='mouseup',\r\n value=10,\r\n id='month-slider-dependence'\r\n ),\r\n\r\n html.Br(),\r\n html.H1(\r\n 'Predictions for the next 3 months',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'black'\r\n }\r\n ),\r\n html.Div([\r\n html.Label(\"Country or continent\"),\r\n dcc.Dropdown([country for country in df['location'].unique()], df['location'][0],\r\n id='country-predictions'),\r\n\r\n html.Br(),\r\n html.Label(\"Variables to predict\"),\r\n dcc.Dropdown(variables_first_country, variables_first_country[0], id='var-to-pred')\r\n ]),\r\n dcc.Graph(id='predictions-graph'),\r\n html.Br(),\r\n html.H1('A Story of COVID Through Unconventional Data',\r\n style={\r\n 'textAlign': 'left',\r\n 'color': 'black'\r\n }\r\n ),\r\n html.H3('The beginning of COVID: did football fans contribute to spreading COVID?'),\r\n dcc.Graph(figure=fifa_plot(fifa_df))\r\n])\r\n\r\n\r\n#####################\r\n# Filtering\r\n@app.callback(\r\n Output('filter-table', 'data'),\r\n Output('filter-table', 'columns'),\r\n Input('variable-to-filter', 'value')\r\n)\r\ndef update_info_filtering(variable):\r\n info_used = filtering_dict[variable]\r\n dict_info = {'variables': list(info_used.keys()),\r\n 'value': list(info_used.values())}\r\n\r\n info_df = pd.DataFrame(dict_info)\r\n info_df.set_index('variables')\r\n update_columns = [{\"name\": i, \"id\": i, \"selectable\": False} for i in info_df.columns]\r\n return info_df.to_dict('records'), update_columns\r\n\r\n\r\n@app.callback(\r\n Output('times-clicked', 'children'),\r\n Output('filtering-button', 'n_clicks'),\r\n Output('df', 'data'),\r\n Output('month-df', 'data'),\r\n Input('radio-filtering', 'value'),\r\n Input('filtering-button', 'n_clicks'),\r\n State('variable-to-filter', 'value'),\r\n State('sign-to-filter', 'value'),\r\n State('num-to-filter', 'value'),\r\n State('df', 'data'),\r\n State('month-df', 'data'),\r\n)\r\ndef filtering(radio_activate, number_conditions_added, var_filter, sign_filter, num_filter, df_stored, month_df_stored):\r\n new_df = pd.read_json(df_stored, orient='split')\r\n new_df['date'] = new_df['date'].dt.strftime('%Y-%m-%d')\r\n new_month_df = pd.read_json(month_df_stored, orient='split')\r\n new_month_df['date'] = new_month_df['date'].dt.strftime('%Y-%m-%d')\r\n if radio_activate == 'Reset':\r\n # constraint_added.clear()\r\n string = u'0 conditions added'\r\n times_clicked = 0\r\n new_df = original_df\r\n new_month_df = get_month_df(original_df)\r\n\r\n elif var_filter == 'None':\r\n string = u'{} conditions added'.format(max([0, number_conditions_added - 1]))\r\n times_clicked = max([0, number_conditions_added - 1])\r\n else:\r\n # constraint_added.append([var_filter, sign_filter, num_filter])\r\n new_df = apply_constraints(new_df, [var_filter, sign_filter, num_filter])\r\n new_month_df = get_month_df(new_df)\r\n string = u'{} conditions added'.format(number_conditions_added)\r\n times_clicked = number_conditions_added\r\n return string, times_clicked, new_df.to_json(date_format='iso', orient='split'), new_month_df.to_json(\r\n date_format='iso', orient='split')\r\n\r\n\r\n#######################\r\n# Multi variables\r\n@app.callback(\r\n Output('country-continent-choice', 'options'),\r\n Output('country-continent-choice', 'value'),\r\n Input('df', 'data'))\r\ndef change_available_countries_mult(data):\r\n used_df = pd.read_json(data, orient='split')\r\n\r\n all_countries = used_df['location'].unique()\r\n return all_countries, all_countries[0]\r\n\r\n\r\n@app.callback(\r\n Output('y-axis', 'options'),\r\n Input('country-continent-choice', 'value'))\r\ndef y_axis_based_on_location(country_cont_choice):\r\n variables_to_show = variables_each_country[country_cont_choice]\r\n for col in columns_to_remove:\r\n if col in variables_to_show:\r\n variables_to_show.remove(col)\r\n return variables_to_show\r\n\r\n\r\n@app.callback(\r\n Output('y-axis', 'value'),\r\n Input('y-axis', 'options'),\r\n Input('y-axis', 'value'))\r\ndef limit_number_choice(options_available, values_chosen):\r\n for val in values_chosen:\r\n if val not in options_available:\r\n return [options_available[0], options_available[1]]\r\n if len(values_chosen) <= 5:\r\n return values_chosen\r\n else:\r\n return values_chosen[:5]\r\n\r\n\r\n@app.callback(\r\n Output('variables-graph', 'figure'),\r\n Input('y-axis', 'value'),\r\n Input('country-continent-choice', 'value'),\r\n Input('df', 'data'))\r\ndef update_graph_multi_var(variables_chosen, country_cont_choice, data):\r\n stored_df = pd.read_json(data, orient='split')\r\n stored_df['date'] = stored_df['date'].dt.strftime('%Y-%m-%d')\r\n used_df = stored_df[stored_df['location'] == country_cont_choice]\r\n\r\n fig = go.Figure()\r\n\r\n dates = used_df['date']\r\n for i in range(len(variables_chosen)):\r\n if i == 0:\r\n fig.add_trace(go.Scatter(\r\n x=dates,\r\n y=used_df[variables_chosen[i]],\r\n name=variables_chosen[i],\r\n ))\r\n else:\r\n fig.add_trace(go.Scatter(\r\n x=dates,\r\n y=used_df[variables_chosen[i]],\r\n name=variables_chosen[i],\r\n yaxis=\"y\" + str(i + 1)\r\n ))\r\n\r\n hex_colors_plotly = ['#636efa', '#ef553b', '#00cc96', '#ac65fa', '#ffa25b']\r\n layout = {}\r\n layout['yaxis'] = {'tickfont': {'color': hex_colors_plotly[0]},\r\n 'title': {'font': {'color': hex_colors_plotly[0]}, 'text': variables_chosen[0]}}\r\n layout['xaxis'] = {'domain': [0.3, 0.9]}\r\n\r\n for i in range(1, len(variables_chosen)):\r\n pos = ((i * 1.4) * 0.25 / len(variables_chosen)) - 0.05\r\n layout['yaxis' + str(i + 1)] = {'anchor': 'free', 'position': pos, 'overlaying': 'y', 'side': 'left',\r\n 'tickfont': {'color': hex_colors_plotly[i]},\r\n 'title': {'font': {'color': hex_colors_plotly[i]}, 'text': variables_chosen[i]}}\r\n\r\n fig.update_layout(layout)\r\n fig.update_layout(title='Evolution of the chosen variables over time')\r\n\r\n return fig\r\n\r\n\r\n#######################\r\n# Correlations\r\n@app.callback(\r\n Output('country-choice', 'options'),\r\n Output('country-choice', 'value'),\r\n Input('df', 'data'))\r\ndef change_available_countries_corr(data):\r\n used_df = pd.read_json(data, orient='split')\r\n\r\n all_countries = used_df['location'].unique()\r\n return all_countries, all_countries[0]\r\n\r\n\r\n@app.callback(\r\n Output('corr-table-not-cumu', 'data'),\r\n Output('corr-table-not-cumu', 'columns'),\r\n Input('country-choice', 'value'),\r\n Input('df', 'data'))\r\ndef update_not_cumu_corr(country_choice, data):\r\n stored_df = pd.read_json(data, orient='split')\r\n stored_df['date'] = stored_df['date'].dt.strftime('%Y-%m-%d')\r\n not_cumu_vars = ['new_cases_per_million', 'new_deaths_per_million', 'excess_mortality', 'icu_patients_per_million',\r\n 'hosp_patients_per_million', 'stringency_index', 'reproduction_rate', 'new_tests_per_thousand',\r\n 'positive_rate', 'new_vaccinations']\r\n country_vars = variables_each_country[country_choice]\r\n sorted_vars = []\r\n for var in not_cumu_vars:\r\n if var in country_vars:\r\n sorted_vars.append(var)\r\n not_cumu_vars = sorted_vars\r\n df_not_cumu = stored_df[stored_df['location'] == country_choice][not_cumu_vars]\r\n\r\n corr_mat_not_cumu = df_not_cumu.corr(method='pearson')\r\n\r\n corr_dict = {'variables': corr_mat_not_cumu.index}\r\n for col in corr_mat_not_cumu.columns:\r\n corr_dict[col] = list(corr_mat_not_cumu[col])\r\n\r\n correlation_df = pd.DataFrame(corr_dict)\r\n correlation_df.set_index('variables')\r\n\r\n update_columns = [{\"name\": i, \"id\": i, \"selectable\": False} for i in correlation_df.columns]\r\n\r\n return correlation_df.to_dict('records'), update_columns\r\n\r\n\r\n@app.callback(\r\n Output('corr-table-cumu', 'data'),\r\n Output('corr-table-cumu', 'columns'),\r\n Input('df', 'data'))\r\ndef update_cumu_corr(data):\r\n stored_df = pd.read_json(data, orient='split')\r\n stored_df['date'] = stored_df['date'].dt.strftime('%Y-%m-%d')\r\n\r\n cumulative_vars = ['total_cases_per_million', 'total_deaths_per_million', 'excess_mortality_cumulative_per_million',\r\n 'total_tests_per_thousand', 'total_vaccinations_per_hundred']\r\n total_cumu = cumulative_vars.copy()\r\n for col in columns_fixed:\r\n total_cumu.append(col)\r\n final_df_dict = {i: [] for i in total_cumu}\r\n final_df = pd.DataFrame.from_dict(final_df_dict)\r\n total_cumu.append('iso_code')\r\n df_cumu = stored_df[total_cumu]\r\n prev_iso = df_cumu['iso_code'].iloc[0]\r\n\r\n for i in range(len(df_cumu)):\r\n curr_iso = df_cumu['iso_code'].iloc[i]\r\n if curr_iso != prev_iso:\r\n final_df.loc[len(final_df)] = df_cumu.iloc[i].drop('iso_code')\r\n prev_iso = curr_iso\r\n final_df.loc[len(final_df)] = df_cumu.iloc[len(df_cumu) - 1].drop('iso_code')\r\n corr_mat_cumu = final_df.corr(method='pearson')\r\n corr_mat_cumu = corr_mat_cumu.drop(cumulative_vars, axis=0)\r\n corr_mat_cumu = corr_mat_cumu.drop(columns_fixed, axis=1)\r\n\r\n corr_dict = {'variables': corr_mat_cumu.index}\r\n for col in corr_mat_cumu.columns:\r\n corr_dict[col] = list(corr_mat_cumu[col])\r\n\r\n correlation_df = pd.DataFrame(corr_dict)\r\n correlation_df.set_index('variables')\r\n\r\n update_columns = [{\"name\": i, \"id\": i, \"selectable\": False} for i in correlation_df.columns]\r\n\r\n return correlation_df.to_dict('records'), update_columns\r\n\r\n\r\n#######################\r\n# Dependencies\r\n@app.callback(\r\n Output('total-dependence-graph', 'figure'),\r\n Input('x-axis-dependence', 'value'),\r\n Input('y-axis-dependence', 'value'),\r\n Input('month-slider-dependence', 'value'),\r\n Input('size-dot-dependence', 'value'),\r\n Input('month-df', 'data'))\r\ndef update_dependence_graphs(x_axis_var, y_axis_var, month_slider, size_dot, month_data):\r\n my_df = pd.read_json(month_data, orient='split')\r\n my_df['date'] = my_df['date'].dt.strftime('%Y-%m-%d')\r\n\r\n cont_df = my_df.copy()\r\n current_month = months_list[month_slider]\r\n my_df = my_df.groupby(['iso_code', 'month'], sort=False).mean().reset_index()\r\n my_df = my_df[my_df['iso_code'].str.contains('OWID') == False]\r\n my_df = my_df[my_df['month'] == current_month]\r\n\r\n cont_df = cont_df[cont_df['iso_code'].str.contains('OWID') == False]\r\n cont_df = cont_df[cont_df['month'] == current_month].drop_duplicates('iso_code')\r\n\r\n my_df['continent'] = list(cont_df['continent'])\r\n my_df['location'] = list(cont_df['location'])\r\n trust_df_indexed = trust_df.set_index('location')\r\n trusts = list(trust_df_indexed.loc[list(cont_df['location'])].fillna(1)['trust_in_gov'])\r\n my_df['trust_in_gov'] = trusts\r\n new_df = pd.DataFrame.from_dict(\r\n {'country': list(my_df['location']), 'continent': list(my_df['continent']), x_axis_var: list(my_df[x_axis_var]),\r\n y_axis_var: list(my_df[y_axis_var]), size_dot: list(my_df[size_dot])})\r\n fig = px.scatter(new_df, x=x_axis_var, y=y_axis_var,\r\n size=size_dot, color=\"continent\", hover_name=\"country\",\r\n size_max=18)\r\n return fig\r\n\r\n\r\n########################\r\n# Predictions\r\n@app.callback(\r\n Output('country-predictions', 'options'),\r\n Output('country-predictions', 'value'),\r\n Input('df', 'data'))\r\ndef change_available_countries(data):\r\n used_df = pd.read_json(data, orient='split')\r\n\r\n all_countries = used_df['location'].unique()\r\n return all_countries, all_countries[0]\r\n\r\n\r\n@app.callback(\r\n Output('var-to-pred', 'options'),\r\n Output('var-to-pred', 'value'),\r\n Input('country-predictions', 'value'))\r\ndef var_for_country_pred(country_choice):\r\n variables_to_show = variables_each_country[country_choice]\r\n for col in columns_to_remove:\r\n if col in variables_to_show:\r\n variables_to_show.remove(col)\r\n for col in columns_fixed:\r\n if col in variables_to_show:\r\n variables_to_show.remove(col)\r\n return variables_to_show, variables_to_show[0]\r\n\r\n\r\n@app.callback(\r\n Output('predictions-graph', 'figure'),\r\n Input('country-predictions', 'value'),\r\n Input('var-to-pred', 'value'),\r\n Input('df', 'data'))\r\ndef update_graph7(country_predict, data_to_predict, data):\r\n data_used_for_prediction = ['total_cases', 'new_cases', 'reproduction_rate', 'stringency_index', 'new_tests',\r\n 'positive_rate']\r\n stored_df = pd.read_json(data, orient='split')\r\n stored_df['date'] = stored_df['date'].dt.strftime('%Y-%m-%d')\r\n ##################\r\n # data management\r\n ##################\r\n all_features = variables_each_country[country_predict]\r\n updated_data_used_for_pred = []\r\n for var in data_used_for_prediction:\r\n if var in all_features:\r\n updated_data_used_for_pred.append(var)\r\n data_used_for_prediction = updated_data_used_for_pred\r\n all_features_to_predict = data_used_for_prediction.copy()\r\n if data_to_predict not in all_features_to_predict:\r\n all_features_to_predict.append(data_to_predict)\r\n\r\n for column in columns_to_remove:\r\n if column in all_features:\r\n all_features.remove(column)\r\n if column in data_used_for_prediction:\r\n data_used_for_prediction.remove(column)\r\n if column in all_features_to_predict:\r\n all_features_to_predict.remove(column)\r\n\r\n updated_col_fixed = []\r\n for col in columns_fixed:\r\n if col in all_features_to_predict:\r\n all_features_to_predict.remove(col)\r\n updated_col_fixed.append(col)\r\n\r\n updated_data_used_for_pred = []\r\n for var in data_used_for_prediction:\r\n if var in all_features:\r\n updated_data_used_for_pred.append(var)\r\n\r\n data_used_for_prediction = updated_data_used_for_pred\r\n\r\n columns_fixed_ordered = []\r\n for col in all_features:\r\n if col in updated_col_fixed:\r\n columns_fixed_ordered.append(col)\r\n updated_col_fixed = columns_fixed_ordered\r\n\r\n new_data_used = data_used_for_prediction.copy()\r\n for col in data_used_for_prediction:\r\n new_data_used.append(str(col) + \"_1\")\r\n new_data_used.append(str(col) + \"_2\")\r\n new_data_used.append(str(col) + \"_3\")\r\n new_data_used.append(str(col) + \"_4\")\r\n new_data_used.append(str(col) + \"_5\")\r\n new_data_used.append(str(col) + \"_6\")\r\n data_used_for_prediction = new_data_used\r\n train_datas = stored_df[stored_df['location'] == country_predict][all_features_to_predict].reset_index(drop=True)\r\n idx_data_to_pred = all_features_to_predict.index(data_to_predict)\r\n\r\n new_model = linear_model.Lasso(alpha=2, normalize=True, max_iter=10000000)\r\n train_datas_7 = generate_data(training_data=train_datas)\r\n new_model.fit(train_datas_7[data_used_for_prediction].iloc[:-1], train_datas[all_features_to_predict].iloc[7:])\r\n weights = get_weights(data_used_for_prediction, new_model.coef_[:][idx_data_to_pred])\r\n non_zero_weights = []\r\n for key in weights.keys():\r\n if abs(weights[key]) > 0:\r\n non_zero_weights.append(key)\r\n\r\n all_predictions = []\r\n all_dates = []\r\n index_fixed = []\r\n for i in range(len(data_used_for_prediction)):\r\n if data_used_for_prediction[i] in updated_col_fixed:\r\n index_fixed.append(i)\r\n\r\n for i in range(90): # 90 days for 3 months\r\n if i == 0:\r\n last_date = str(df['date'].iloc[-1])\r\n else:\r\n last_date = str(all_dates[-1])\r\n last_datetime = datetime.strptime(last_date, '%Y-%m-%d')\r\n new_datetime = last_datetime + timedelta(days=1)\r\n new_date = str(new_datetime)[:10]\r\n all_dates.append(new_date)\r\n\r\n predicted_data = new_model.predict(train_datas_7[data_used_for_prediction].iloc[-1].to_numpy().reshape(1, -1))\r\n all_predictions.append(predicted_data)\r\n if len(index_fixed) > 0:\r\n for idx in index_fixed:\r\n predicted_data[0].insert(idx, train_datas_7[data_used_for_prediction[i]].iloc[-1])\r\n\r\n new_row = []\r\n for feature in all_features_to_predict:\r\n feature_1 = str(feature) + \"_1\"\r\n new_row.append(train_datas_7[feature_1].iloc[-1])\r\n j = 0\r\n for feature in all_features_to_predict:\r\n feature_2 = str(feature) + \"_2\"\r\n new_row.append(train_datas_7[feature_2].iloc[-1])\r\n feature_3 = str(feature) + \"_3\"\r\n new_row.append(train_datas_7[feature_3].iloc[-1])\r\n feature_4 = str(feature) + \"_4\"\r\n new_row.append(train_datas_7[feature_4].iloc[-1])\r\n feature_5 = str(feature) + \"_5\"\r\n new_row.append(train_datas_7[feature_5].iloc[-1])\r\n feature_6 = str(feature) + \"_6\"\r\n new_row.append(train_datas_7[feature_6].iloc[-1])\r\n new_row.append(predicted_data[0][j])\r\n j = j + 1\r\n\r\n train_datas_7.loc[len(train_datas_7)] = new_row\r\n\r\n x = df[df['location'] == country_predict]['date'].tolist()\r\n y = train_datas[data_to_predict].tolist()\r\n for i in range(len(all_dates)):\r\n x.append(all_dates[i])\r\n y.append(all_predictions[i][0][idx_data_to_pred])\r\n\r\n ##################\r\n # plot the predictions\r\n ##################\r\n prediction_df = pd.DataFrame({\"date\": x, \"value\": y})\r\n fig = px.line(prediction_df, x=\"date\", y=\"value\")\r\n\r\n fig.update_yaxes(title=str(data_to_predict + \" predicted for next 3 months\"))\r\n fig.add_vline(x=x[-91], line_width=1, line_color=\"red\")\r\n\r\n return fig\r\n\r\n\r\n@app.callback(\r\n Output('Choropleth Map', 'figure'),\r\n Input('chorplethdropdown', 'value'),\r\n Input('monthchoroplethmap', 'value'), # gives a numerical value\r\n Input('month-df', 'data'))\r\ndef choropleth_map(choroplethdropdown, monthchoroplethmap, month_df_loaded):\r\n my_df = pd.read_json(month_df_loaded, orient='split')\r\n my_df['date'] = my_df['date'].dt.strftime('%Y-%m-%d')\r\n\r\n my_df = my_df.groupby(['iso_code', 'month'], sort=False).mean().reset_index()\r\n my_df = my_df[my_df['iso_code'].str.contains('OWID') == False]\r\n\r\n colorscale = ['#ffd7cd', '#e3ada0', '#c68475', '#a95c4c', '#893427', '#690000']\r\n\r\n current_month = months_list[monthchoroplethmap]\r\n my_df = my_df[my_df['month'] == current_month]\r\n min_color = np.max(my_df[str(choroplethdropdown)])\r\n max_color = np.min(my_df[str(choroplethdropdown)])\r\n\r\n fig = px.choropleth(my_df, locations='iso_code', color=str(choroplethdropdown),\r\n color_continuous_scale=colorscale, hover_name=\"iso_code\", range_color=(min_color, max_color))\r\n\r\n background_color = '#F5F2E8'\r\n\r\n fig.update_layout(font_family='Balto', font_color='#000000',\r\n font_size=18, plot_bgcolor=background_color,\r\n geo=dict(\r\n showframe=False,\r\n showcoastlines=False,\r\n countrycolor='#000000',\r\n bgcolor=background_color,\r\n lakecolor=background_color,\r\n landcolor='rgba(51,17,0,0.2)',\r\n subunitcolor='grey'\r\n\r\n ))\r\n\r\n # Delete antartica\r\n fig.add_trace(go.Choropleth(locations=['ATA'],\r\n z=[0],\r\n colorscale=[[0, background_color], [1, background_color]],\r\n marker_line_color=background_color,\r\n showlegend=False,\r\n showscale=False)\r\n )\r\n\r\n return fig\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run_server(debug=True)\r\n","repo_name":"jlarija/DataVisualisationProject","sub_path":"dashboard_pres.py","file_name":"dashboard_pres.py","file_ext":"py","file_size_in_byte":28215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"36167694137","text":"import pika\n\n\n# 1 建立一个到RabbitMQ服务器的连接,设置指定参数\nconnection=pika.BlockingConnection(pika.ConnectionParameters('localhost',5672))\n\n#\nchannel=connection.channel()\n\n# 2 先创建一个名为hello的队列,若发送消息前队列不存在,rb会抛弃这条消息!\nchannel.queue_declare(queue='hello')\n\n# 3 使用默认交换机(exchange)\nchannel.basic_publish(exchange='',\n routing_key='hello',\n body='Hello world')\n\nprint('已发送')\n# 3 关闭连接\nconnection.close()","repo_name":"evaseemefly/GridForecastSys","sub_path":"byRabbitMQ/test/producer.py","file_name":"producer.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"206587766","text":"try:\n from PyQt5.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QAction\n from PyQt5.QtGui import QIcon\nexcept ImportError:\n try:\n from PyQt4.QtGui import QApplication, QSystemTrayIcon, QMenu, QAction, QIcon\n except ImportError:\n from PySide.QtGui import QApplication, QSystemTrayIcon, QMenu, QAction, QIcon\nimport sys\n\n\nclass SubMenu(QMenu):\n def __init__(self, *args, label=None, parent=None, **kwargs):\n if label is None:\n QMenu.__init__(self, parent)\n else:\n QMenu.__init__(self, label, parent)\n\n def add_separator(self):\n self.addSeparator()\n\n def add_command(self, label=\"\", command=None):\n action = QAction(label, self)\n if command is not None:\n action.triggered.connect(lambda *args: command())\n self.addAction(action)\n\n def add_cascade(self, label=\"\", menu=None):\n if menu is None:\n menu = SubMenu(label, self)\n action = QAction(label, self)\n action.setMenu(menu)\n self.addAction(action)\n\n def delete(self, item1, item2=None):\n index1 = self.index(item1)\n if item2 is None:\n self.removeAction(self.actions()[index1])\n else:\n index2 = self.index(item2)\n a = self.actions()\n for i in range(index1, index2):\n self.removeAction(a[i])\n\n def index(self, index):\n if isinstance(index, int):\n return index\n elif index == \"end\":\n return len(self.actions())\n else:\n try:\n i = [i.text() for i in self.actions()].index(index)\n except ValueError:\n raise ValueError(\"%r not in menu\" % index)\n return i\n\n def get_item_label(self, item):\n return self.actions()[self.index(item)].text()\n\n def set_item_label(self, item, label):\n i = self.actions()[self.index(item)]\n i.setText(label)\n\n def set_item_menu(self, item, menu):\n i = self.actions()[self.index(item)]\n i.setMenu(menu)\n\n def get_item_menu(self, item):\n i = self.actions()[self.index(item)]\n return i.menu()\n\n def disable_item(self, item):\n self.actions()[self.index(item)].setDisabled(True)\n\n def enable_item(self, item):\n self.actions()[self.index(item)].setDisabled(False)\n\n\nclass TrayIcon(QApplication):\n\n def __init__(self, icon, fallback_icon_path, **kwargs):\n QApplication.__init__(self, sys.argv)\n self._fallback_icon = QIcon(fallback_icon_path)\n self._icon = QIcon.fromTheme(icon, self._fallback_icon)\n self.tray_icon = QSystemTrayIcon()\n self.tray_icon.setIcon(self._icon)\n\n self.menu = SubMenu()\n self.tray_icon.setContextMenu(self.menu)\n self.tray_icon.show()\n\n def loop(self, tk_window):\n self.processEvents()\n tk_window.loop_id = tk_window.after(10, self.loop, tk_window)\n\n def change_icon(self, icon, desc):\n del self._icon\n self._icon = QIcon(icon)\n self.tray_icon.setIcon(self._icon)\n\n def bind_left_click(self, command):\n\n def action(reason):\n \"\"\"Execute command only on click (not when the menu is displayed).\"\"\"\n if reason == QSystemTrayIcon.Trigger:\n command()\n\n self.tray_icon.activated.connect(action)\n","repo_name":"j4321/MyNotes","sub_path":"mynoteslib/trayicon/qticon.py","file_name":"qticon.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"46"} +{"seq_id":"14075845785","text":"import requests\nimport datetime\nimport json\nfrom django.http import HttpResponse, JsonResponse\nfrom . import utils\nfrom . import config\n\n\ndef instagram(request):\n if '' == request.GET.get('sight', ''):\n return HttpResponse(status=400)\n\n sight = utils.fromSpaceToUnderLine(request.GET['sight'])\n URL = f\"https://www.instagram.com/explore/tags/{sight}/?__a=1\"\n undistilled = {}\n distilled = {}\n\n try:\n undistilled = requests.get(URL).json(\n )['graphql']['hashtag']['edge_hashtag_to_top_posts']['edges']\n distilled = []\n\n for datum in undistilled:\n distilled.append({\n 'thumbnail': datum['node']['thumbnail_src'],\n 'liked': datum['node']['edge_liked_by']['count'],\n 'published': datetime.date.fromtimestamp(datum['node']['taken_at_timestamp']).isoformat(),\n 'url': f\"https://www.instagram.com/p/{datum['node']['shortcode']}\"\n })\n\n return JsonResponse(\n distilled,\n safe=False,\n json_dumps_params={'ensure_ascii': False}\n )\n except:\n return HttpResponse(status=500)\n\n\ndef weather(request):\n if '' == request.GET.get('country', '') and '' == request.GET.get('city', ''):\n return HttpResponse(status=400)\n\n country = request.GET['country']\n city = request.GET['city']\n URL = f\"http://api.openweathermap.org/data/2.5/weather?q={city},{country}&appid={config.OpenWeatherMap['secret']}\"\n undistilled = {}\n distilled = {}\n\n try:\n undistilled = requests.get(URL).json()\n distilled = {\n 'icon': utils.formatByWeather(undistilled['weather'][0]['main']),\n 'text': undistilled['weather'][0]['description'],\n 'temperature': {\n 'min': utils.formatByTemperature(undistilled['main']['temp_min']),\n 'max': utils.formatByTemperature(undistilled['main']['temp_max']),\n 'now': utils.formatByTemperature(undistilled['main']['temp']),\n },\n 'humidity': undistilled['main']['humidity'],\n 'wind': {\n 'way': utils.formatByCardinal(undistilled['wind']['deg']),\n 'speed': undistilled['wind']['speed'],\n },\n }\n\n return JsonResponse(\n distilled,\n safe=False,\n json_dumps_params={'ensure_ascii': False}\n )\n except:\n return HttpResponse(status=500)\n\n\ndef naver(request):\n if '' == request.GET.get('value', ''):\n return HttpResponse(status=400)\n\n value = request.GET['value']\n URL = f\"https://openapi.naver.com/v1/search/blog.json?query={value}\"\n undistilled = {}\n distilled = {}\n\n try:\n undistilled = requests.get(URL, headers={\n 'X-Naver-Client-Id': config.Naver['appid'],\n 'X-Naver-Client-Secret': config.Naver['secret']\n }).json()['items']\n distilled = []\n\n for datum in undistilled:\n distilled.append({\n 'url': datum['link'],\n 'title': utils.removeTag(datum['title']),\n 'description': utils.removeTag(datum['description']),\n 'published': datetime.datetime.strptime(datum['postdate'], '%Y%m%d').date().isoformat(),\n })\n\n return JsonResponse(\n distilled,\n safe=False,\n json_dumps_params={'ensure_ascii': False}\n )\n except:\n return HttpResponse(status=500)\n","repo_name":"everip/server-api","sub_path":"peristalsis/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"7187629968","text":"# from openpyxl import load_workbook\n\n# wb = load_workbook(filename='/Users/fkl/vr/dataset/test.xlsx')\n# ws = wb.worksheets[0]\n# img = openpyxl.drawing.Image('test.jpg')\n# img.anchor(ws.cell('E1'))\n# ws.add_image(img)\n\n# img = openpyxl.drawing.Image('test.jpg')\n# img.anchor(ws.cell('E2'))\n# ws.add_image(img)\n# wb.save('out.xlsx')\n\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom openpyxl.drawing.image import Image\n\nwb = load_workbook(filename='/Users/fkl/vr/dataset/test.xlsx')\nws = wb.worksheets[0]\n\nfor i in range(2, 4456):\n link = str(ws['D'+str(i)].value)\n image_name = link.split(\"/\")[-1]\n image_path = \"/Users/fkl/vr/dataset/web_low_res/\" + image_name\n print(image_path)\n img = Image(image_path)\n ws.add_image(img, 'E'+str(i))\n\n\n# ws.add_image(img1)\n# ws.add_image(img2)\n\n#ws.add_image(img1, 'B2')\n#ws.add_image(img2, 'B14')\n\nwb.save('excel-image.xlsx')\n","repo_name":"Kail-Fu/vrosetta","sub_path":"add_image_to_sheet/addimage.py","file_name":"addimage.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"481815116","text":"\"\"\"public projects\n\nRevision ID: 4b558aa4806\nRevises: 289417c9d06\nCreate Date: 2016-07-08 18:05:57.498826\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '4b558aa4806'\ndown_revision = '289417c9d06'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('project', sa.Column('public', sa.Boolean(), server_default=sa.text('false'), nullable=False))\n op.create_index(op.f('ix_project_public'), 'project', ['public'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_project_public'), table_name='project')\n op.drop_column('project', 'public')\n ### end Alembic commands ###\n","repo_name":"sprucedev/DockCI","sub_path":"alembic/versions/4b558aa4806_public_projects.py","file_name":"4b558aa4806_public_projects.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"46"} +{"seq_id":"38796749414","text":"# 가로 길이가 N, 세로 길이가 2인 직사각형 형태의 얇은 바닥이 있다. 태일이는 이 바닥을 1 x 2, 2 x 1, 2 x 2 덥개를 이용해 채우려고 한다\n\n# 바닥을 채우는 모든 경우의 수를 구하시오.\n\n# 예를 들어 2 x 3 크기의 바닥을 채우는 경우의 수는 5가지다.\n\n# 입력 조건:\n\n# 첫째 줄에 N이 주어진다. (1 ≤ N ≤ 1000)\n\n# 출력 조건:\n\n# 둘째 줄에 2 x N 크기의 바닥을 채우는 방법의 수를 796796으로 나눈 나머지를 출력한다.\n\n# my answer\nn = int(input())\nd = [0] * 1001\n\nd[1] = 1\nd[2] = 3\n\n\ndef tile(x):\n if d[x] != 0:\n return d[x]\n d[x] = tile(x - 1) + 2 * tile(x - 2)\n return d[x]\n\n\nprint(\"my answer: \" + str(tile(n) % 796796))\n\n# solution\nd = [0] * 1001\n\nd[1] = 1\nd[2] = 3\nfor i in range(3, n + 1):\n d[i] = (d[i - 1] + 2 * d[i - 2]) % 796796\n\nprint(\"solution: \" + str(d[n]))\n","repo_name":"JunHCha/Algorithm-Practice","sub_path":"algorithms_in_python/06_dynamic_programing/8-7.py","file_name":"8-7.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"1265027527","text":"'''\nCreated by sunwoong on 2022/12/26\n'''\nfrom collections import defaultdict\n\ndef solution(clothes):\n table = defaultdict(int)\n for _, kind in clothes:\n table[kind] += 1\n answer = 1\n for count in table.values():\n answer *= count + 1\n return answer - 1","repo_name":"SunwoongH/Algorithm","sub_path":"Programmers/Level2/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"46"} +{"seq_id":"70112996619","text":"from __future__ import absolute_import, division, print_function\nfrom iotbx import pdb\nimport iotbx.phil\nfrom iotbx.option_parser import option_parser\nfrom cctbx import euclidean_model_matching\nfrom cctbx import sgtbx\nfrom scitbx import matrix\nfrom libtbx.utils import Sorry\nfrom libtbx.str_utils import show_string\nfrom libtbx.test_utils import approx_equal\nimport sys, os\n\nmaster_params = iotbx.phil.parse(input_string=\"\"\"\\\nreference {\n file_name=None\n .type=path\n atom_selection=None\n .type=str\n}\nother {\n file_name=None\n .type=path\n atom_selection=None\n .type=str\n}\noutput {\n file_name=None\n .type=path\n atom_selection=None\n .type=str\n}\ncrystal_symmetry {\n unit_cell=None\n .type=unit_cell\n space_group=None\n .type=space_group\n}\n\"\"\")\n\ndef run(args, command_name=\"iotbx.pdb.superpose_centers_of_mass\"):\n if (len(args) == 0): args = [\"--help\"]\n command_line = (option_parser(\n usage=\n \"%s [options] [reference_file] [other_file] [parameter_file]\" %\n command_name)\n .enable_show_defaults()\n .enable_symmetry_comprehensive()\n ).process(args=args)\n if (command_line.expert_level is not None):\n master_params.show(\n expert_level=command_line.expert_level,\n attributes_level=command_line.attributes_level)\n sys.exit(0)\n #\n # Loop over command-line arguments.\n #\n parameter_interpreter = master_params.command_line_argument_interpreter()\n parsed_params = []\n pdb_file_names = []\n command_line_params = []\n for arg in command_line.args:\n arg_is_processed = False\n if (os.path.isfile(arg)):\n params = None\n try: params = iotbx.phil.parse(file_name=arg)\n except KeyboardInterrupt: raise\n except RuntimeError: pass\n else:\n if (len(params.objects) == 0):\n params = None\n if (params is not None):\n parsed_params.append(params)\n arg_is_processed = True\n elif (pdb.is_pdb_file(file_name=arg)):\n pdb_file_names.append(arg)\n arg_is_processed = True\n if (not arg_is_processed):\n try:\n params = parameter_interpreter.process(arg=arg)\n except Sorry as e:\n if (not os.path.isfile(arg)): raise\n raise Sorry(\"Unknown file format: %s\" % arg)\n else:\n command_line_params.append(params)\n #\n # Consolidation of inputs, resulting in effective phil_params.\n #\n phil_params = master_params.fetch(\n sources=parsed_params+command_line_params)\n params = phil_params.extract()\n for param_group in [params.reference, params.other, params.output]:\n if (param_group.file_name is None\n and len(pdb_file_names) > 0):\n param_group.file_name = pdb_file_names[0]\n pdb_file_names = pdb_file_names[1:]\n if (len(pdb_file_names) > 0):\n raise Sorry(\"Too many PDB file names: %s\" % \", \".join([\n show_string(s) for s in pdb_file_names]))\n if (params.output.file_name is None\n and params.other.file_name is not None):\n name = os.path.basename(params.other.file_name)\n if (name.lower().endswith(\".pdb\")): name = name[:-4]\n name += \"_superposed.pdb\"\n params.output.file_name = name\n if (params.crystal_symmetry.unit_cell is None):\n params.crystal_symmetry.unit_cell = \\\n command_line.symmetry.unit_cell()\n if (params.crystal_symmetry.space_group is None):\n params.crystal_symmetry.space_group = \\\n command_line.symmetry.space_group_info()\n phil_params = master_params.format(python_object=params)\n phil_params.show()\n print(\"#phil __OFF__\")\n #\n # Final checks.\n #\n if (params.reference.file_name is None):\n raise Sorry(\"Required file name is missing: reference.file_name\")\n if (params.other.file_name is None):\n raise Sorry(\"Required file name is missing: other.file_name\")\n if (params.output.file_name is None):\n raise Sorry(\"Required file name is missing: output.file_name\")\n #\n # Processing of input PDB files.\n #\n pdb_objs = []\n sites_carts = []\n centers_of_mass = []\n for param_group in [params.reference, params.other]:\n pdb_obj = pdb.input(file_name=param_group.file_name)\n pdb_obj.atoms = pdb_obj.construct_hierarchy().atoms()\n pdb_objs.append(pdb_obj)\n sites_carts.append(pdb_obj.atoms.extract_xyz())\n sites_sel = sites_carts[-1]\n if (param_group.atom_selection is not None):\n sel = pdb_obj.construct_hierarchy().atom_selection_cache().selection(\n param_group.atom_selection)\n sites_sel = sites_sel.select(sel)\n print(\"Number of selected sites:\", sites_sel.size())\n centers_of_mass.append(sites_sel.mean())\n #\n # Consolidation of crystal symmetries.\n #\n crystal_symmetry = command_line.symmetry\n for pdb_obj in pdb_objs:\n crystal_symmetry_from_pdb = pdb_obj.crystal_symmetry()\n if (crystal_symmetry_from_pdb is not None):\n crystal_symmetry = crystal_symmetry.join_symmetry(\n other_symmetry=crystal_symmetry_from_pdb,\n force=False)\n if (crystal_symmetry.unit_cell() is None):\n raise Sorry(\"Unknown unit cell parameters.\"\n \"\\n Use --unit_cell or --symmetry to supply unit cell parameters.\")\n if (crystal_symmetry.space_group_info() is None):\n raise Sorry(\"Unknown space group symmetry.\"\n \"\\n Use --space_group or --symmetry to supply symmetry information.\")\n crystal_symmetry.show_summary()\n #\n # Obtain transformation to reference setting.\n # To ensure all allowed origin shifts are parallel to the basis vectors.\n #\n cb_op_to_ref = crystal_symmetry.change_of_basis_op_to_reference_setting()\n sym_ref = crystal_symmetry.change_basis(cb_op=cb_op_to_ref)\n #\n # Obtain allowed origin shifts.\n # This is the most convenient interface. Essentially we just need\n # sgtbx.structure_seminvariants.\n #\n match_symmetry = euclidean_model_matching.euclidean_match_symmetry(\n space_group_info=sym_ref.space_group_info(),\n use_k2l=False,\n use_l2n=False)\n #\n # Compute the symmetry operation which maps the center of mass of\n # \"other\" closest to the center of mass of \"reference.\"\n #\n centers_frac = [\n sym_ref.unit_cell().fractionalize(cb_op_to_ref.c() * center_cart)\n for center_cart in centers_of_mass]\n dist_info = sgtbx.min_sym_equiv_distance_info(\n sym_ref.special_position_settings().sym_equiv_sites(centers_frac[0]),\n centers_frac[1],\n match_symmetry.continuous_shift_flags)\n sym_op = cb_op_to_ref.inverse().apply(dist_info.sym_op())\n print(\"Rotation in fractional space:\", sym_op.r().as_xyz())\n sym_op = sym_op.as_rational().as_float() \\\n + matrix.col(dist_info.continuous_shifts())\n print(\"Translation in fractional space: (%s)\" % (\n \", \".join([\"%.6g\" % t for t in sym_op.t])))\n #\n centers_frac = [sym_ref.unit_cell().fractionalize(center_cart)\n for center_cart in centers_of_mass]\n sym_center_frac = sym_op * centers_frac[1]\n sym_center_cart = crystal_symmetry.unit_cell().orthogonalize(sym_center_frac)\n print(\"Centers of mass:\")\n print(\" Reference: (%s)\" % \", \".join([\"%8.2f\" % v\n for v in centers_of_mass[0]]))\n print(\" Original other: (%s)\" % \", \".join([\"%8.2f\" % v\n for v in centers_of_mass[1]]))\n print(\" Symmetry related other: (%s)\" % \", \".join([\"%8.2f\" % v\n for v in sym_center_cart]))\n print(\"Cartesian distance between centers of mass: %.4f\" % dist_info.dist())\n #\n # Internal consistency check (in input setting).\n #\n assert approx_equal(crystal_symmetry.unit_cell().distance(\n centers_frac[0], sym_center_frac), dist_info.dist())\n #\n # Transform atomic coordinates of \"other.\"\n #\n sites_frac_other = crystal_symmetry.unit_cell().fractionalize(\n sites_cart=sites_carts[1])\n sites_frac_other_superposed = sym_op * sites_frac_other\n sites_cart_other_superposed = crystal_symmetry.unit_cell().orthogonalize(\n sites_frac=sites_frac_other_superposed)\n #\n # Replace original coordinates with transformed coordinates.\n #\n pdb_objs[1].atoms.set_xyz(new_xyz=sites_cart_other_superposed)\n #\n # Write (selected) transformed coordinates.\n #\n pdb_hierarchy = pdb_objs[1].construct_hierarchy()\n if (params.output.atom_selection is not None):\n sel = pdb_hierarchy.atom_selection_cache().selection(\n params.output.atom_selection)\n pdb_hierarchy = pdb_hierarchy.select(atom_selection=sel)\n pdb_hierarchy.write_pdb_file(\n file_name=params.output.file_name,\n crystal_symmetry=crystal_symmetry,\n append_end=True,\n atoms_reset_serial_first_value=1)\n\nif (__name__ == \"__main__\"):\n run(sys.argv[1:])\n","repo_name":"cctbx/cctbx_project","sub_path":"iotbx/command_line/pdb.superpose_centers_of_mass.py","file_name":"pdb.superpose_centers_of_mass.py","file_ext":"py","file_size_in_byte":8409,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"46"} +{"seq_id":"19976769602","text":"import os\nimport socket\n\nPROJECT_PATH = os.path.abspath(os.path.dirname(__file__))\nSITE_ROOT = os.path.dirname(os.path.realpath(__file__))\n\nHOST_NAME = socket.gethostname()\n\n#CACHING\nCACHES = {\n 'default': {\n 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': 'unix:/tmp/memcached.sock',\n 'KEY_PREFIX': 'gameoflife',\n }\n}\n\nEMAIL_LAYOUT = 'mail/base.html'\nSERVER_EMAIL = 'webserver@gameoflife.com'\n\n# Pure Pagination\nPAGINATION_SETTINGS = {\n 'PAGE_RANGE_DISPLAYED': 7,\n 'MARGIN_PAGES_DISPLAYED': 2,\n }\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nDEBUG_TOOLBAR_CONFIG = {\n 'INTERCEPT_REDIRECTS': False,\n }\n\nDEBUG_TOOLBAR_PANELS = (\n 'debug_toolbar.panels.version.VersionDebugPanel',\n 'debug_toolbar.panels.timer.TimerDebugPanel',\n 'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',\n 'debug_toolbar.panels.headers.HeaderDebugPanel',\n 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',\n 'debug_toolbar.panels.sql.SQLDebugPanel',\n 'debug_toolbar.panels.template.TemplateDebugPanel',\n 'debug_toolbar.panels.signals.SignalDebugPanel',\n 'debug_toolbar.panels.logger.LoggingPanel',\n 'fusionbox.panels.user_panel.panels.UserPanel',\n)\n\n\nADMINS = (\n ('Aaron Merriam', 'aaronmerriam@gmail.com'),\n)\n\nLOGIN_URL = '/'\nLOGIN_REDIRECT_URL = '/'\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': 'sqlite_database', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n },\n}\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# On Unix systems, a value of None will cause Django to use the same\n# timezone as the operating system.\n# If running in a Windows environment this must be set to the same as your\n# system time zone.\nTIME_ZONE = 'America/Denver'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale\nUSE_L10N = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = os.path.join(PROJECT_PATH, '..', 'media')\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = os.path.join(PROJECT_PATH, '..', \"static\")\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n os.path.join(PROJECT_PATH, 'public'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'bink_5kk$ilzssne!j3(t4o**u^m^fgz!nxlvd+txic*#qi*ki'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\n# Debug Toolbar Settings\nINTERNAL_IPS = (\n '127.0.0.1',\n '63.228.88.83',\n '209.181.77.56',\n )\n\nROOT_URLCONF = 'gameoflife.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'gameoflife.wsgi.application'\n\nSTATES_YAML = 'projects/states.yaml'\n\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_PATH, 'templates')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.admindocs',\n 'django.contrib.humanize',\n 'django.contrib.gis',\n\n # Support Apps\n 'south',\n 'django_extensions',\n 'fusionbox',\n 'fusionbox.panels.user_panel',\n 'debug_toolbar',\n 'compressor',\n 'pure_pagination',\n\n # Site Apps\n 'gameoflife',\n 'world',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n)\n\nCOMPRESS_PRECOMPILERS = (\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nCOMPRESS_ENABLED = True\n\n# Import server specific settings 'settings_.py'\ntry:\n live_settings = __import__ ('settings_' + HOST_NAME)\nexcept ImportError:\n pass\nelse:\n try:\n attrlist = live_settings.__all__\n except AttributeError:\n attrlist = dir (live_settings)\n for attr in attrlist:\n if attr.startswith('__'):\n continue\n globals()[attr] = getattr (live_settings, attr)\n\nFORCE_SCRIPT_NAME = ''\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n\n# This import needs to happen after settings_local import due to how the cache loads\nfrom django.template.loader import add_to_builtins\nadd_to_builtins('cachebuster.templatetags.cachebuster')\n\nDATABASE_ENGINE = DATABASES['default']['ENGINE']\n","repo_name":"pipermerriam/django-game-of-life","sub_path":"gameoflife/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"755229344","text":"def main():\n print(\"hello calculator\")\n a = 2\n b = 4\n c = multiply(a, b)\n print(str(a) + \" * \" + str(b) + \" = \" + str(c))\n\n\ndef multiply(x, y):\n # do it wrong\n # return x / y\n # do it right\n return x * y\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"stewart-lab/calculator_rs","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"21689888177","text":"try:\n from fbchat import log, Client\n from fbchat.models import *\nexcept ModuleNotFoundError:\n print('Required modules not found.')\n exit()\n\n\nclass SendBot(Client):\n \"\"\"\n Over write the onMessage function to reply to a message instead of just logging it in.\n \"\"\"\n\n def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):\n self.markAsDelivered(thread_id, message_object.uid)\n self.markAsRead(thread_id)\n\n log.info(f\"{message_object} from {thread_id} in {thread_type.name}\")\n\n user = self.fetchUserInfo(thread_id)\n\n reply = f\"Hello {user[thread_id].name} 👻\\nI am currently not available on facebook. Call if it's something important.\\nThank You 💙\"\n if author_id != self.uid:\n self.send(Message(text=reply), thread_id=thread_id,\n thread_type=ThreadType.USER)\n","repo_name":"sulavmhrzn/facebook-autoreply-bot","sub_path":"utils/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"2817542200","text":"import openpyxl\nimport datetime\nimport DailyTask\n\ndef wdts(num):\n if num == 0:\n return \"Monday\"\n elif num == 1:\n return \"Tuesday\"\n elif num == 2:\n return \"Wednesday\"\n elif num == 3:\n return \"Thursday\"\n elif num == 4:\n return \"Friday\"\n elif num == 5:\n return \"Saturday\"\n elif num == 6:\n return \"Sunday\"\n\ndef stwd(weekday):\n if weekday == \"Monday\":\n return 0\n elif weekday == \"Tuesday\":\n return 1\n elif weekday == \"Wednesday\":\n return 2\n elif weekday == \"Thursday\":\n return 3\n elif weekday == \"Friday\":\n return 4\n elif weekday == \"Saturday\":\n return 5\n elif weekday == \"Sunday\":\n return 6\n\ndef initFile(hwfilename, assignmentSheetName, projectSheetName, dailyTaskSheetName, dailyTaskDataSheetName):\n wb = openpyxl.Workbook()\n wb.active.title = assignmentSheetName\n wb.create_sheet().title = projectSheetName\n wb.create_sheet().title = dailyTaskSheetName\n wb.create_sheet().title = dailyTaskDataSheetName\n wb.active['A1'] = \"Title\"\n wb.active['B1'] = \"Category\"\n wb.active['C1'] = \"Deadline\"\n wb.active['D1'] = \"Location\"\n wb.active['E1'] = \"Notes\"\n wb.active = wb[projectSheetName]\n wb.active['A1'] = \"Title\"\n wb.active['B1'] = \"Deadline\"\n wb.active['C1'] = \"Notes\"\n wb.active = wb[dailyTaskSheetName]\n wb.active['A1'] = \"Title\"\n wb.active['B1'] = \"Pattern\"\n wb.active['C1'] = \"Notes\"\n wb.active = wb[dailyTaskSheetName]\n wb.active['A1'] = \"Title\"\n wb.active['B1'] = \"Begin Data\"\n wb.save(filename=hwfilename)\n\ndef printHelp():\n print(\"-a: add assignment\")\n print(\"-f: remove assignment\")\n print(\"-p: add project\")\n print(\"-i: remove project\")\n print(\"-g: add daily task\")\n print(\"-w: mark off daily task as successful\")\n print(\"-d: remove daily task\")\n print(\"-h: print this help menu\")\n print(\"-v: print your to do list, including daily tasks that you don't need to do today.\")\n print(\"No command line arguments: print your to-do list\")\n print(\"Don't use this unless your computer's clock is correct.\")\n print(\" On Linux/BSD/MacOS, see the date command.\")\n print(\" On Windows, look at the \\\"Change Date and Time\\\" settings in \\\"System Settings\\\".\")\n print(\"To learn more: https://github.com/robert2343/Planner-Reinvented\")\n\ndef month2Int(month):\n if month == \"January\":\n return 1\n elif month == \"February\":\n return 2\n elif month == \"March\":\n return 3\n elif month == \"April\":\n return 4\n elif month == \"May\":\n return 5\n elif month == \"June\":\n return 6\n elif month == \"July\":\n return 7\n elif month == \"August\":\n return 8\n elif month == \"September\":\n return 9\n elif month == \"October\":\n return 10\n elif month == \"November\":\n return 11\n elif month == \"December\":\n return 12\n else:\n return -1\n\ndef sortDateLambda(date):\n if date == None:\n return datetime.datetime(9999, 12, 31, 23, 59, 59, 999999)\n else:\n return date\n\ndef findIndexbyFirstCell(file, cellVal, sheetName):\n wb = openpyxl.load_workbook(filename=file)\n sheet = wb[sheetName]\n iterRows = sheet.iter_rows()\n listRows = list(iterRows)\n for i in range(1, len(listRows)):\n if listRows[i][0].value == cellVal:\n return i\n\ndef deleteItemByName(file, sheetName, delUp):\n wb = openpyxl.load_workbook(filename=file)\n sheet = wb[sheetName]\n name = input(\"Enter the name of the assignment to remove: \")\n idx = findIndexbyFirstCell(file, name, sheetName)\n sheet.delete_rows(idx + 1 - delUp, 1 + delUp)\n wb.save(file)\n return name\n\ndef deleteItemByNameNoInp(file, sheetName, name, delUp):\n wb = openpyxl.load_workbook(filename=file)\n sheet = wb[sheetName]\n idx = findIndexbyFirstCell(file, name, sheetName)\n sheet.delete_rows(idx + 1 - delUp, 1 + delUp)\n wb.save(file)\n return idx\n\n#decompose and numToLetter taken from https://codereview.stackexchange.com/questions/182733/base-26-letters-and-base-10-using-recursion\ndef decompose(num):\n while num:\n num, remainder = divmod(num - 1, 26)\n yield remainder\n\ndef numToLetter(num):\n return ''.join(chr(ord('A') + part)for part in decompose(num))[::-1]\n\ndef writeCell(x, y, val, file, sheetName):\n wb = openpyxl.load_workbook(filename=file)\n sheet = wb[sheetName]\n sheet[numToLetter(x) + str(y)].value = val\n wb.save(file)\n\ndef updateDaily(val, name, file, sheetName, dataSheetName, refDate, suppressOutput):\n idx = findIndexbyFirstCell(file, name, sheetName)\n dt = DailyTask.DailyTask.readInFromFile(idx + 1, file, sheetName, dataSheetName)\n if len(dt.boolArr) >= len(dt.datesArr):\n print(\"There is a serious problem. If you didn't manually modify the Excel file, report a bug.\")\n else:\n if dt.datesArr[-1].replace(hour=0, minute=0, second=0, microsecond=0) == refDate.replace(hour=0, minute=0, second=0, microsecond=0):\n placeAtX = len(dt.boolArr) + 2\n placeAtY = findIndexbyFirstCell(file, name, dataSheetName) + 1\n writeCell(placeAtX, placeAtY, val, file, dataSheetName)\n writeCell(placeAtX + 1, placeAtY - 1, dt.calculateNextDate(refDate + datetime.timedelta(days=1)), file, dataSheetName)\n else:\n if not suppressOutput:\n print(\"This cannot be checked off today\")\n","repo_name":"robert2343/Planner-Reinvented","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"71873469581","text":"import torch\nfrom torchvision import transforms, datasets\nfrom torchvision.transforms import v2\nfrom matplotlib import pyplot\n\nmean = torch.Tensor([0.4902, 0.4732, 0.4374])\nstd = torch.Tensor([0.1834, 0.1803, 0.1797])\n\ntransform = v2.Compose([\n v2.ToImage(),\n v2.ToDtype(torch.float32, scale=True),\n v2.RandomRotation(degrees=30),\n v2.RandomPerspective(),\n v2.RandomResizedCrop(size=(32, 32), antialias=True),\n # v2.Normalize(mean=mean, std=std)\n])\n\ndata = datasets.CIFAR10('CIFAR10', download=True, transform=transform)\n\n# mean = torch.Tensor([0, 0, 0])\n# std = torch.Tensor([0, 0, 0])\n# for image, _ in data:\n# for i in range(3):\n# std_, mean_ = torch.std_mean(image[i])\n# mean[i] += mean_\n# std[i] += std_\n# mean /= len(data)\n# std /= len(data)\n#\n# print(f'mean={mean}, std={std}')\n\nto_show = 5\nfor image, label in data:\n image: torch.Tensor = image\n image = torch.transpose(image, 0, 1)\n image = torch.transpose(image, 1, 2)\n\n pyplot.imshow(image)\n pyplot.show()\n\n to_show -= 1\n if to_show <= 0:\n break\n","repo_name":"PJutch/AiBlob","sub_path":"AIOlymp/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"2270775408","text":"import os\n\nos.chdir(r\"C:\\Users\\user\\Desktop\")\n\nwith open(\"rosalind_rstr.txt\") as f:\n n, prob, seq = f.read().split()\n\nn= int(n)\nprob = float(prob)\n\ndic = {}\n\ndic[\"A\"] = (1-prob)/2\ndic[\"C\"] = prob/2\ndic[\"G\"] = prob/2\ndic[\"T\"] = (1-prob)/2\n\nanswer = 1\n\nfor i in seq:\n answer *= dic[i]\n\nprint(round(1- (1-answer)**(n-len(seq)+1), 3))\n","repo_name":"bloodmage1/Rosalind","sub_path":"solution/35 Matching Random Motifs/35. Matching Random Motifs.py","file_name":"35. Matching Random Motifs.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"20031075466","text":"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport struct\r\nfrom bmpRead import ReadBMPFile\r\nfrom SVD import svdCompression\r\nfrom HFM import *\r\n\r\ndef decomposite(inputbmp):\r\n\t# eta = input(\"您希望的图片最小压缩率:\")\r\n\tk = round(inputbmp.biHeight * inputbmp.biWidth / 4 / (1 + inputbmp.biHeight + inputbmp.biWidth))\r\n\tfor eta in range(100, 0, -10):\r\n\t\tur, sigmar, vr, kr = svdCompression(inputbmp.R, eta/100)\r\n\t\tug, sigmag, vg, kg = svdCompression(inputbmp.G, eta/100)\r\n\t\tub, sigmab, vb, kb = svdCompression(inputbmp.B, eta/100)\r\n\t\tif(max(kr, kb, kg) < k):\r\n\t\t\tprint('eta:',eta)\r\n\t\t\tprint(k*eta,kr,kb,kg)\r\n\t\t\tbreak\r\n\tbuff = []\r\n\tbuff_extend(inputbmp.bfType, buff)\r\n\tbuff_extend(inputbmp.bfSize.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.bfOffBits.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biSize.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biWidth.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biHeight.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biPlanes.to_bytes(2, 'little'), buff)\r\n\tbuff_extend(inputbmp.biBitCount.to_bytes(2, 'little'), buff)\r\n\tbuff_extend(inputbmp.biCompression.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biSizeImage.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biXPelsPerMeter.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biYPelsPerMeter.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biClrUsed.to_bytes(4, 'little'), buff)\r\n\tbuff_extend(inputbmp.biClrImportant.to_bytes(4, 'little'), buff)\r\n\r\n\tbuff_extend(kr.to_bytes(4, 'big'), buff)\r\n\tfor row in ur:\r\n\t\tbuff_extend(struct.pack('>%sf' % len(row), *row), buff)\r\n\tbuff_extend(struct.pack('>%sf' % len(sigmar), *sigmar), buff)\r\n\tfor row in vr:\r\n\t\tbuff_extend(struct.pack('>%sf' % len(row), *row), buff)\r\n\tbuff_extend(kg.to_bytes(4, 'big'), buff)\r\n\tfor row in ug:\r\n\t\tbuff_extend(struct.pack('>%sf' % len(row), *row), buff)\r\n\tbuff_extend(struct.pack('>%sf' % len(sigmag), *sigmag), buff)\r\n\tfor row in vg:\r\n\t\tbuff_extend(struct.pack('>%sf' % len(row), *row), buff)\r\n\tbuff_extend(kb.to_bytes(4, 'big'), buff)\r\n\tfor row in ub:\r\n\t\tbuff_extend(struct.pack('>%sf' % len(row), *row), buff)\r\n\tbuff_extend(struct.pack('>%sf' % len(sigmab), *sigmab), buff)\r\n\tfor row in vb:\r\n\t\tbuff_extend(struct.pack('>%sf' % len(row), *row), buff)\r\n\treturn buff\r\n\r\ndef buff_extend(bistr, buff):\r\n\tfor x in bistr:\r\n\t\tbuff.append(x.to_bytes(1,byteorder = 'big'))\r\n\r\ndef encodestr(buff, inputfile):\r\n\t#数据初始化\r\n\tnode_dict = {}\t#建立原始数据与编码节点的映射,便于稍后输出数据的编码\r\n\tcount_dict = {}\r\n\tnodes = []\t#结点列表,用于构建哈夫曼树\r\n\tprint(\"Start encode...\")\r\n\tprint(\"size of string:\",len(buff),\"B\")\r\n\t#计算字符频率,并将单个字符构建成单一节点\r\n\tfor x in buff:\r\n\t\tif count_dict.get(x) is None:\r\n\t\t\tcount_dict[x] = 0\r\n\t\tcount_dict[x] += 1\r\n\tprint(\"Read finish\")\r\n\t# print(count_dict)\t#输出权值字典,可注释掉\r\n\tfor x in count_dict.keys():\r\n\t\tnode_dict[x] = node(value = count_dict[x])\r\n\t\tnodes.append(node_dict[x])\r\n\tbuild_tree(nodes)\t#哈夫曼树构建\r\n\tenco_dict = dict_code(node_dict)\t#构建编码表\r\n\tprint(\"Encode finish\")\r\n\r\n\t#对所有根节点进行排序\r\n\thead = sorted(count_dict.items(),key = lambda x:x[1],reverse = True)\r\n\tprint(\"head:\",head[0][1])\t#动态调整编码表的字节长度,优化文件头大小\r\n\tif head[0][1] < 256:\r\n\t\tbit_width = 1\r\n\telif head[0][1] < 65536:\r\n\t\tbit_width = 2\r\n\telif head[0][1] < 16777216:\r\n\t\tbit_width = 3\r\n\telse:\r\n\t\tbit_width = 4\r\n\tprint(\"bit_width:\",bit_width)\r\n\to = open(inputfile.split('.')[0]+\".hfm\", 'wb')\r\n\t#写出原文件名\r\n\to.write((inputfile.split('/')[-1] + '\\n').encode(encoding='utf-8'))\r\n\to.write(len(enco_dict).to_bytes(2,byteorder = 'big'))\t#写出结点数量\r\n\to.write(bit_width.to_bytes(1,byteorder = 'big'))\t#写出编码表字节宽度\r\n\tfor x in enco_dict.keys():\t#编码文件头\r\n\t\to.write(x)\r\n\t\to.write(count_dict[x].to_bytes(bit_width,byteorder = 'big'))\r\n\r\n\traw = 0b1\t#数据写入相关\r\n\tlast = 0\t#记录压缩进度\r\n\tprint(\"Write head finish\")\r\n\tfor i in range(len(buff)):\t#开始压缩数据\r\n\t\tfor x in enco_dict[buff[i]]:\r\n\t\t\traw = raw << 1\r\n\t\t\tif x == 49:\r\n\t\t\t\traw = raw | 1\r\n\t\t\tif raw.bit_length() == 9:\r\n\t\t\t\traw = raw & (~(1 << 8))\r\n\t\t\t\to.write(raw.to_bytes(1,byteorder = 'big'))\r\n\t\t\t\t# o.flush()\r\n\t\t\t\traw = 0b1\r\n\t\ttem = round(i / len(buff) * 100)\r\n\t\tif tem > last:\r\n\t\t\tprint(\"\\rrate of encode:\",tem,\"%\", end=\"\")\t#输出压缩进度\r\n\t\t\tlast = tem\r\n\r\n\tif raw.bit_length() > 1:\t#处理文件尾部不足一个字节的数据\r\n\t\traw = raw << (8 - (raw.bit_length() - 1))\r\n\t\traw = raw & (~(1 << raw.bit_length() - 1))\r\n\t\to.write(raw.to_bytes(1, byteorder = 'big'))\r\n\to.close()\r\n\tprint(\"\\nFile encode success.\")\r\n\r\ndef bmp_compress():\r\n\twhile True:\r\n\t\t# inputpath = input(\"请输入bmp文件:\")\r\n\t\tinputpath = \"f3.bmp\"\r\n\t\tname = inputpath.split('.')\r\n\t\tif name[-1] == \"bmp\":\r\n\t\t\tinputbmp = ReadBMPFile(inputpath)\r\n\t\t\tbuff = decomposite(inputbmp)\r\n\t\t\tencodestr(buff, inputpath)\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(inputpath, \"is not a BMP file!\\n\")\r\n\r\ndef decodestring(inputfile):\r\n\r\n\t#数据初始化\r\n\tnode_dict = {}\t#建立原始数据与编码节点的映射,便于稍后输出数据的编码\r\n\tcount_dict = {}\r\n\tnodes = []\t#结点列表,用于构建哈夫曼树\r\n\r\n\tprint(\"Start decode...\")\r\n\tf = open(inputfile,'rb')\r\n\tf.seek(0,2)\r\n\teof = f.tell()\r\n\tf.seek(0)\r\n\r\n\toutputfile = inputfile.replace(inputfile.split('/')[-1], \\\r\n\t\tf.readline().decode(encoding='utf-8').replace('\\n',''))\r\n\t\r\n\tcount = int.from_bytes(f.read(2), byteorder = 'big')\t#取出结点数量\r\n\tbit_width = int.from_bytes(f.read(1), byteorder = 'big')\t#取出编码表字宽\r\n\tfor i in range(count):\t#解析文件头\r\n\t\tkey = f.read(1)\r\n\t\tcount_dict[key] = int.from_bytes(f.read(bit_width), byteorder = 'big')\r\n\tfor x in count_dict.keys():\r\n\t\tnode_dict[x] = node(value = count_dict[x])\r\n\t\tnodes.append(node_dict[x])\r\n\tbuild_tree(nodes)\t#重建哈夫曼树\r\n\tenco_dict = dict_code(node_dict)\t#建立编码表\r\n\tinverse_dict = {val: key for key, val in enco_dict.items()}\t#反向字典构建\r\n\ti = f.tell()\r\n\tprint(\"huffmam file:\",eof,\"B\")\r\n\tlast = 0\r\n\tdata = b''\r\n\tbuff = bytearray()\r\n\r\n\twhile i < eof:\t#开始解压数据\r\n\t\traw = int.from_bytes(f.read(1), byteorder = 'big')\r\n\t\ti += 1\r\n\t\tfor j in range(8,0,-1):\r\n\t\t\tif (raw >> (j - 1)) & 1 == 1:\r\n\t\t\t\tdata += b'1'\r\n\t\t\t\traw = raw & (~(1 << (j - 1)))\r\n\t\t\telse:\r\n\t\t\t\tdata += b'0'\r\n\t\t\t\traw = raw & (~(1 << (j - 1)))\r\n\t\t\tif inverse_dict.get(data) is not None:\r\n\t\t\t\tbuff.extend(inverse_dict[data])\r\n\t\t\t\t# print(\"decode\",data,\":\",inverse_dict[data])\r\n\t\t\t\tdata = b''\r\n\t\ttem = round(i / eof * 100)\r\n\t\tif tem > last:\t\t\t\t\t\t\t\r\n\t\t\tprint(\"\\rrate of decode:\",tem,\"%\", end = '')\t#输出解压进度\r\n\t\t\tlast = tem\r\n\tf.close()\r\n\tprint(\"\\nFile decode success.\")\r\n\r\n\timgdecompress(buff, outputfile)\r\n\r\ndef buff_pop(num, buff):\r\n\ts = buff[:num]\r\n\tdel buff[:num]\r\n\treturn s\r\n\r\ndef imgdecompress(buff, outputfile):\r\n\r\n\to = open(outputfile,'wb')\r\n\tfor i in range(6):\r\n\t\to.write(buff.pop(0).to_bytes(1,byteorder = 'little'))\r\n\to.write(b'\\x00\\x00\\x00\\x00')\r\n\tfor i in range(8):\r\n\t\to.write(buff.pop(0).to_bytes(1,byteorder = 'little'))\r\n\tWidth = int.from_bytes(buff[:4], byteorder='little')\r\n\tHeight = int.from_bytes(buff[4:8], byteorder='little')\r\n\tfor i in range(36):\r\n\t\to.write(buff.pop(0).to_bytes(1,byteorder = 'little'))\r\n\r\n\tkr = int.from_bytes(buff_pop(4,buff),byteorder = 'big')\r\n\tur = np.zeros((Height,kr))\r\n\tfor i in range(Height):\r\n\t\tfor j in range(kr):\r\n\t\t\tur[i, j] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tsigmar = np.zeros(kr)\r\n\tfor i in range(kr):\r\n\t\tsigmar[i] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tvr = np.zeros((kr,Width))\r\n\tfor i in range(kr):\r\n\t\tfor j in range(Width):\r\n\t\t\tvr[i, j] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tR = np.rint(ur.dot(np.diag(sigmar)).dot(vr).clip(0,255)).astype('uint8')\r\n\r\n\tkg = int.from_bytes(buff_pop(4,buff),byteorder = 'big')\r\n\tug = np.zeros((Height,kg))\r\n\tfor i in range(Height):\r\n\t\tfor j in range(kg):\r\n\t\t\tug[i, j] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tsigmag = np.zeros(kg)\r\n\tfor i in range(kg):\r\n\t\tsigmag[i] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tvg = np.zeros((kg,Width))\r\n\tfor i in range(kg):\r\n\t\tfor j in range(Width):\r\n\t\t\tvg[i, j] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tG = np.rint(ug.dot(np.diag(sigmag)).dot(vg).clip(0,255)).astype('uint8')\r\n\r\n\tkb = int.from_bytes(buff_pop(4,buff),byteorder = 'big')\r\n\tub = np.zeros((Height,kb))\r\n\tfor i in range(Height):\r\n\t\tfor j in range(kb):\r\n\t\t\tub[i, j] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tsigmab = np.zeros(kb)\r\n\tfor i in range(kb):\r\n\t\tsigmab[i] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tvb = np.zeros((kb,Width))\r\n\tfor i in range(kb):\r\n\t\tfor j in range(Width):\r\n\t\t\tvb[i, j] = struct.unpack('>f', buff_pop(4, buff))[0]\r\n\tB = np.rint(ub.dot(np.diag(sigmab)).dot(vb).clip(0,255)).astype('uint8')\r\n\r\n\tI = np.flipud(np.stack((B, G, R), axis = 2))\r\n\tfor row in I:\r\n\t\tcount = 0\r\n\t\tfor column in row:\r\n\t\t\to.write(struct.pack('<%sB' % len(column), *column))\r\n\t\t\tcount += 3\r\n\t\twhile count % 4 != 0:\r\n\t\t\to.write(b'\\x00')\r\n\t\t\tcount += 1\r\n\to.close()\r\n\r\ndef decompress():\r\n\t# inputpath = input(\"请要解压的文件:\")\r\n\tinputpath = \"c7.hfm\"\r\n\tdecodestring(inputpath)\r\n\r\nif __name__ == \"__main__\":\r\n\t# if input(\"1:压缩图片\\t2:解压图片\\n请输入你要执行的操作:\") == '1':\r\n\t\tbmp_compress()\r\n\t# else:\r\n\t\t# decompress()\r\n","repo_name":"JesseMcRae/bmp_compress","sub_path":"Compression.py","file_name":"Compression.py","file_ext":"py","file_size_in_byte":9403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"1988032197","text":"from urllib import response\nimport requests\nfrom bs4 import BeautifulSoup\nfrom openpyxl import Workbook\nimport re\nimport traceback\n\nfrom soupsieve import select\n\n\n\n # 워크북 생성\nwb = Workbook(write_only=True)\nws = wb.create_sheet()\nws.append(['사건구분', '키워드/결과', '승소 요지/사건 개요', '변호사'])\n\nfor case_num in range(7, 420): # uid 7 ~ 419까지 크롤링\n try:\n url = 'https://www.minwho.kr/case/major_case.html?bmain=view&uid={}'.format(case_num)\n response = requests.get(url)\n rating_page = response.text\n soup = BeautifulSoup(rating_page, 'html.parser') \n test_keyword = str(soup.select_one('p.tit'))\n # test = soup.select_one('div.board-box').get_text().strip()\n # print(test)\n # test_1 = re.sub(' +', ' ', re.sub('[\\n|\\xa0]', ' ', test)).strip() # re.sub(' +') - 공백 2개 이상 제거\n # print(test_1)\n # test = soup.select_one('p.bar_span').get_text().strip()\n # test_1 = test.splitlines() # 줄바꿈 기호 기준으로 쪼개기\n # print(test_1[1])\n # print(test_keyword)\n \n if '승소' in test_keyword: # 제목에 '승소'가 들어간 것만 크롤링\n case_tag = soup.select_one('p.bar_span').get_text().strip()\n case_tags = case_tag.splitlines() # 줄바꿈 기호 기준으로 쪼개기\n case_division = case_tags[0].replace('관련 업무분야 : ', '')\n case_keyword = soup.select_one('p.tit').get_text()\n case_victorypoint_overview_test = soup.select_one('div.board-box').get_text().strip()\n case_victorypoint_overview = re.sub(' +', ' ', re.sub('[\\n|\\xa0]', ' ', case_victorypoint_overview_test)).strip() # 공백 다 제거\n case_lawyer = case_tags[1]\n row = [case_division, case_keyword, case_victorypoint_overview, case_lawyer]\n ws.append(row)\n\n except:\n print('.')\n\n\nwb.save('록션_데이터수집_민후_2.xlsx')\n\n# case_division 사건 구분\n# case_keyword 키워드\n# case_victorypoint_overview 승소요지 / 사건개요\n# case_lawyer 변호사","repo_name":"kipple99/OfficeAutomation","sub_path":"beautifulsoup_law_data_minwho_2.py","file_name":"beautifulsoup_law_data_minwho_2.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"4512712947","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom .helpers import *\nfrom .generators import task_report_full, attendance_report_full\nfrom apps.task.helpers import get_task_list\nfrom apps.user.management.commands.generate_attendance_report import get_work_hours\nfrom apps.task.config import TASK_STATUS_DICT as TSD\n\n\nclass TaskCombined(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n manager:\n type: int\n required: Yes\n\n date_range:\n type: int\n default: 1\n options:\n (1, last day),\n (2, this week),\n (3, last week),\n (4, this month),\n (5, last month),\n\n date:\n type: iso date\n required: No\n\n Sample Response:\n ---\n {\n 'complete': 10,\n 'delayed': 13,\n 'cancelled': 14,\n 'postponed': 14,\n }\n\n \"\"\"\n task_qs = get_task_qs(request)\n complete = task_qs.filter(status=TSD['Complete']).count()\n cancelled = task_qs.filter(status=TSD['Cancelled']).count()\n delayed = task_qs.filter(delayed=True).count()\n postponed = task_qs.filter(status=TSD['Postponed']).count()\n\n data = {\n 'complete': complete,\n 'delayed': delayed,\n 'cancelled': cancelled,\n 'postponed': postponed\n }\n return Response(data, status=200)\n\n\nclass TaskFilteredList(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n manager:\n type: int\n required: Yes\n\n agent:\n type: int\n required: No\n\n date_range:\n type: int\n default: 1\n options:\n (1, last day),\n (2, this week),\n (3, last week),\n (4, this month),\n (5, last month),\n\n date:\n type: iso date\n required: No\n\n status:\n type: int\n required: No\n choices:\n (3, 'Complete'),\n (4, 'Cancelled'),\n (5, 'Postponed'),\n\n delayed:\n type: bool\n required: no\n\n Sample Response:\n ---\n [\n {\n 'title': 'Task_test1',\n 'status': 1,\n 'deadline': datetime,\n 'task_type': 'Visit1',\n 'address': '',\n 'agent_list': [50, 51]\n },...\n ]\n\n\n \"\"\"\n task_qs = get_task_qs(request)\n data = get_task_list(task_qs)\n return Response(data, status=200)\n\n\nclass AttendanceCombined(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n manager:\n type: int\n required: Yes\n\n date:\n type: date str\n default: Yes\n\n Sample Response:\n ---\n {\n 'present': 14,\n 'absent': 2,\n 'low_work_hour': 6,\n 'low_task_hour': 23\n\n }\n\n \"\"\"\n\n atc_qs, manager_qf, field = get_atc_qs(request)\n\n present = 0\n absent = 0\n low_work_hour = 0\n low_task_hour = 0\n\n for atc_obj in atc_qs:\n present += atc_obj.present.filter(manager_qf).count()\n absent += atc_obj.absent.filter(manager_qf).count()\n low_work_hour += atc_obj.low_work_hour.filter(manager_qf).count()\n low_task_hour += atc_obj.low_task_hour.filter(manager_qf).count()\n\n data = {\n 'present': present,\n 'absent': absent,\n 'low_work_hour': low_work_hour,\n 'low_task_hour': low_task_hour\n }\n\n return Response(data, status=200)\n\n\nclass AttendanceFiltered(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n manager:\n type: int\n required: Yes\n\n date:\n type: date str\n required: Yes\n\n field:\n type: str\n choices: 'present', 'absent', 'low_work_hour', 'low_task_hour', 'late_arrival'\n\n Sample Response:\n ---\n\n [\n {\n 'id': 222,\n 'username': 'username1',\n 'full_name': 'full_name',\n 'image': 'image_url..',\n 'designation': 'designation',\n 'phone': 'phone',\n 'manager_name': 'manager_name',\n 'manager_id': 23,\n 'manager_image': 'image_url..',\n 'manager_designation': 'designation',\n 'role': 1,\n },..\n ]\n\n \"\"\"\n\n atc_qs, manager_qf, field = get_atc_qs(request)\n data = []\n\n for atc_obj in atc_qs:\n users = []\n if field == 'present':\n users = atc_obj.present.filter(manager_qf)\n if field == 'absent':\n users = atc_obj.absent.filter(manager_qf)\n if field == 'low_work_hour':\n users = atc_obj.low_work_hour.filter(manager_qf)\n if field == 'low_task_hour':\n users = atc_obj.low_task_hour.filter(manager_qf)\n if field == 'late_arrival':\n users = atc_obj.late_arrival.filter(manager_qf)\n\n att_list = get_attendance_list(users, atc_obj.date)\n for att in att_list:\n data.append(att)\n\n return Response(data, status=200)\n\n\nclass AttendanceIndividual(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n\n agent:\n type: int\n required: Yes\n\n date:\n type: date str\n required: Yes\n\n date_range:\n type: int\n Choices: (1, last day), (2, last week), (3, last month)\n Default: 1\n\n Sample Response:\n ---\n {\n 'present': 14,\n 'absent': 2,\n 'low_work_hour': 6,\n 'low_task_hour': 23\n\n }\n \"\"\"\n org = request.user.org\n ati_qs = get_ati_qs(request)\n\n min_work_hour = get_work_hours(org) * (org.min_work_hour_p / 100)\n present_qs = ati_qs.filter(Q(status=ASD['Present']))\n\n present = present_qs.count()\n absent = ati_qs.filter(Q(status=ASD['Absent'])).count()\n low_work_hour = present_qs.filter(Q(work_hour__lt=min_work_hour)).count()\n low_task_hour = present_qs.filter(Q(task_hour_p__lt=org.min_task_hour_p)).count()\n\n data = {\n 'present': present,\n 'absent': absent,\n 'low_work_hour': low_work_hour,\n 'low_task_hour': low_task_hour\n }\n\n return Response(data, status=200)\n\n\nclass Rankings(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n topic:\n type: str\n required: Yes\n choices:\n 'absence',\n 'low_task_hour',\n 'low_work_hour',\n 'overtime',\n 'late_arrival',\n 'delayed_task',\n 'travel_distance'\n\n manager:\n type: int\n required: Yes\n\n date_range:\n type: int\n default: 1\n options:\n (2, this week),\n (3, last week),\n (4, this month),\n (5, last month),\n\n\n Sample Response:\n ---\n [\n {\n 'topic': ,\n 'id': 12,\n 'agent_name': 'agent1',\n 'image': 'url1',\n : 10, // topic comes from the choice list\n },...\n ]\n\n\n \"\"\"\n top_five, topic = get_ranking_qs(request)\n data = get_ranking_details(top_five, topic)\n\n return Response(data, status=200)\n\n\nclass AttendanceExport(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n agent:\n type: int\n default: False\n\n manager:\n type: int\n default: False\n\n date_range:\n type: int\n default: 1\n options: (1, last day), (2, last week), (3, last month)\n\n date:\n type: date str\n default: false\n\n Sample Response:\n ---\n [\n {\n 'date': date_string,\n 'agent': 'full_name',\n 'team': 'team_name',\n 'status': 'Present',\n 'entry_time': timestamp,\n 'exit_time': timestamp,\n 'work_hour': '02:30:49'\n },\n\n {\n 'date': date_string,\n 'agent': 'full_name',\n 'team': 'team_name',\n 'status': 'Present',\n 'entry_time': timestamp,\n 'exit_time': timestamp,\n 'work_hour': '02:30:49'\n },....\n\n ]\n\n \"\"\"\n date_wise_counts = attendance_report_full(request)\n\n return Response(date_wise_counts, status=200)\n\n\nclass TaskExport(APIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"\n Parameter details:\n ---\n agent:\n type: int\n default: False\n date_range:\n type: int\n default: 1\n options: (1, last day), (2, last week), (3, last month)\n\n date:\n type: date str\n default: false\n\n Sample Response:\n ---\n [\n {\n 'deadline': timestamp,\n 'title': 'task.title',\n 'agent': 'full_name',\n 'expected_duration': '02:30:49',\n 'actual_duration': 02:30:49',\n 'address': 'task address',\n 'status': 'task status',\n 'delayed': 'Yes'/'No'\n },\n\n {\n 'deadline': timestamp,\n 'title': 'task.title',\n 'agent': 'full_name',\n 'expected_duration': '02:30:49',\n 'actual_duration': 02:30:49',\n 'address': 'task address',\n 'status': 'task status',\n 'delayed': 'Yes'/'No'\n },....\n\n ]\n\n \"\"\"\n data = task_report_full(request)\n return Response(data, status=200)\n","repo_name":"ash018/FFTracker","sub_path":"apps/report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"17690694681","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nplt.style.use('dark_background')\r\n# x range between 0~10, there is 100 elements in between\r\nx = np.linspace(0,10,100)\r\nfig = plt.figure()\r\n\r\n#Different graph range or starting point\r\nplt.plot(x, np.sin(x))\r\n\r\nplt.xlim(10, 0)\r\nplt.ylim(1.5,-1.5)\r\n#or\r\n#plt.axis([-1,11,-1.5,1.5])\r\n\r\n#Save as .png file\r\nfig.savefig('my_figure8.png')\r\n\r\nplt.show()\r\n","repo_name":"TaeYounKwon/Programming-Language-Dev","sub_path":"Python_Matplotlib Graph Practice/Matplotlib_Basic_Practice8_limitRange.py","file_name":"Matplotlib_Basic_Practice8_limitRange.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"10302230398","text":"import logging\nimport time\n\n\nclass PrintingCounter:\n def __init__(self, name, module=1000):\n self._counter = 0\n self._name = name\n self._module = module\n self._start_time = time.time()\n self._last_time = self._start_time\n\n def increase(self):\n self._counter = self._counter + 1\n if self._counter % self._module == 0:\n now = time.time()\n secs_since_last_time = round(now - self._last_time, 4)\n ratio = round(self._module/secs_since_last_time, 1)\n logging.debug(f\"{self._name}: {self._counter}. Seconds between {self._module} increase: {secs_since_last_time}s \"\n + f\"Total seconds: {round(now - self._start_time, 4)}s. Ratio: {ratio}inc/s\")\n self._last_time = now\n\n def print_final(self):\n now = time.time()\n logging.debug(f\"{self._name}: {self._counter}. Total seconds: {round(now - self._start_time, 4)}s\")\n","repo_name":"andrezszambrano/SistDistTp1","sub_path":"server/server_common/printing_counter.py","file_name":"printing_counter.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"73195523338","text":"import imp\nfrom requests import request\nfrom rest_framework import generics, status, permissions\nfrom ...models import Product, Category\nfrom ...serializers import ProductSerializer, ProductCreateSerializer\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\nfrom datetime import datetime\nfrom rest_framework import viewsets\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\nfrom rest_framework.views import APIView\n\nclass ProductView():\n\n product_create_response = openapi.Response('response description', ProductSerializer)\n @swagger_auto_schema(method='GET', responses= {200: product_create_response})\n @api_view(['GET'])\n @permission_classes([permissions.IsAuthenticated])\n def get_products(self):\n queryset = Product.objects.all()\n serializer = ProductSerializer(queryset, many=all)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n @swagger_auto_schema(method='POST', request_body=ProductCreateSerializer, responses= {200: product_create_response})\n @api_view(['POST'])\n @permission_classes([permissions.IsAuthenticated, permissions.IsAdminUser])\n def create_product(request):\n data = {\n 'category': request.data.get('category_id'), \n 'name': request.data.get('name'), \n 'title': request.data.get('title'), \n 'price': request.data.get('price'),\n 'image': request.data.get('image'),\n 'score': request.data.get('score'),\n 'created_at' : datetime.now(),\n 'updated_at': datetime.now()\n }\n serializer = ProductSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\nclass ProductDetail():\n product_response = openapi.Response('response description', ProductSerializer)\n @swagger_auto_schema(method='DELETE', responses= {200: 'Delete successfully'})\n @api_view(['DELETE'])\n @permission_classes([permissions.IsAuthenticated, permissions.IsAdminUser])\n def delete_product(request, id):\n product = Product.objects.get(id=id)\n product.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @swagger_auto_schema(method='GET', responses= {200: product_response})\n @api_view(['GET'])\n @permission_classes([permissions.IsAuthenticated])\n def get_product(request, id):\n product = Product.objects.get(id=id)\n serializer = ProductSerializer(product)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n ","repo_name":"xuantrinh/django","sub_path":"sample/web_api/views/product/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"43880900520","text":"import logging\nimport logging.config\nimport sys\nimport threading\nfrom argparse import ArgumentParser\nfrom signal import SIGINT, SIGTERM, signal\nfrom typing import Callable\n\nfrom network_tracing.daemon.app import Application, ApplicationConfig\nfrom network_tracing.daemon.constants import DEFAULT_LOGGING_CONFIG\n\nlogging.config.dictConfig(DEFAULT_LOGGING_CONFIG)\n\nlogger = logging.getLogger(__name__)\n\n\ndef _create_parser() -> ArgumentParser:\n parser = ArgumentParser()\n parser.add_argument('-c',\n '--config',\n default='/etc/network-tracing/ntd-config.json',\n metavar='PATH',\n help='path to configuration file')\n return parser\n\n\ndef _parse_args():\n parser = _create_parser()\n args = parser.parse_args()\n return args\n\n\ndef _create_start_app(config_file_path: str) -> Application:\n logger.info('Loading config file %s', config_file_path)\n config = ApplicationConfig.load_file(config_file_path)\n\n app = Application(config)\n app.start()\n\n signal_handler = _create_signal_handler(app)\n signal(SIGINT, signal_handler)\n signal(SIGTERM, signal_handler)\n\n logger.info(\n 'Application started; press Ctrl + C or send SIGINT/SIGTERM to exit')\n\n return app\n\n\ndef _wait_forever():\n forever = threading.Event()\n forever.wait()\n\n\ndef _create_signal_handler(app: Application) -> Callable:\n ignore = lambda sig, stack: None\n\n def signal_handler(sig, stack):\n logger.info('Gracefully shutting down')\n signal(SIGINT, ignore)\n signal(SIGTERM, ignore)\n try:\n app.stop()\n sys.exit(0)\n except Exception:\n logger.error('Graceful shutdown failed', exc_info=True)\n sys.exit(1)\n\n return signal_handler\n\n\ndef main():\n args = _parse_args()\n _create_start_app(args.config)\n _wait_forever()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"YangHanlin/network-tracing","sub_path":"network_tracing/daemon/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28761370854","text":"def simulate(self, n=1, t=None):\n \"\"\"Simulates the network forward.\n\n Simulates either a specific number of events or for a specified\n amount of simulation time.\n\n Parameters\n ----------\n n : int (optional, default: 1)\n The number of events to simulate. If ``t`` is not given\n then this parameter is used.\n t : float (optional)\n The amount of simulation time to simulate forward. If\n given, ``t`` is used instead of ``n``.\n\n Raises\n ------\n QueueingToolError\n Will raise a :exc:`.QueueingToolError` if the\n ``QueueNetwork`` has not been initialized. Call\n :meth:`.initialize` before calling this method.\n\n Examples\n --------\n Let ``net`` denote your instance of a ``QueueNetwork``. Before\n you simulate, you need to initialize the network, which allows\n arrivals from outside the network. To initialize with 2 (random\n chosen) edges accepting arrivals run:\n\n >>> import queueing_tool as qt\n >>> g = qt.generate_pagerank_graph(100, seed=50)\n >>> net = qt.QueueNetwork(g, seed=50)\n >>> net.initialize(2)\n\n To simulate the network 50000 events run:\n\n >>> net.num_events\n 0\n >>> net.simulate(50000)\n >>> net.num_events\n 50000\n\n To simulate the network for at least 75 simulation time units\n run:\n\n >>> t0 = net.current_time\n >>> net.simulate(t=75)\n >>> t1 = net.current_time\n >>> t1 - t0 # doctest: +ELLIPSIS\n 75...\n \"\"\"\n if not self._initialized:\n msg = (\"Network has not been initialized. \"\n \"Call '.initialize()' first.\")\n raise QueueingToolError(msg)\n if t is None:\n for dummy in range(n):\n self._simulate_next_event(slow=False)\n else:\n now = self._t\n while self._t < now + t:\n self._simulate_next_event(slow=False)","repo_name":"MichaelFu1998-create/security_scanning","sub_path":"codesearchnet/codesearchnet_8910.py","file_name":"codesearchnet_8910.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"70071216139","text":"import time\nimport pytest\nfrom pages.test_Launchpage import Launchpage\n\n@pytest.mark.usefixtures(\"setup\")\nclass Testframework():\n def test_framework(self):\n lp=Launchpage(self.driver,self.wait)\n lp.departloc(\"New Delhi\")\n lp.goingto(\"Ban\")\n lp.searchResults(\"Bangalore (BLR)\")\n lp.selectdate(\"09/08/2022\")\n lp.clicksearch()\n lp.scroldriver()\n\n sf=searchflight(self.driver)\n sp.resultflight()\n all_stop=self.wait.until(EC.presence_of_all_elements_located((By.XPATH,\"//span[contains(text(),'Non Stop') or contains(text(),'1 Stop') or contains(text(),'2 Stop')]\")))\n for stop in all_stop:\n print(\"the Text\",+stop.text)\n assert stop.text =='1 Stop'\n print(\"assert Pass\")\n\n\n","repo_name":"git-pardeep/selenium-framework","sub_path":"testcases/test_searchfiles.py","file_name":"test_searchfiles.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"41660816628","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nimport datetime\nimport pathlib\nimport re\nfrom sequencing_run.ssh_command import ssh_command\nfrom sequencing_run.sample_sheet import index_barcode_key_to_fields\nfrom sequencing_run.models import DemultiplexedSequencing, Flowcell, SequencingAnalysisRun\n\nfrom sequencing_run.assemble_libraries import prepare_to_assemble_libraries\nfrom sequencing_run.analysis import start_cromwell, ANALYSIS_COMMAND_LABEL\n\nclass Command(BaseCommand):\n\thelp = 'load demultiplexed index-barcode bams from a sequencing run into database'\n\t\n\tdef add_arguments(self, parser):\n\t\tparser.add_argument('--date_string', required=True)\n\t\tparser.add_argument('--name', required=True)\n\t\tparser.add_argument('--analysis_run', type=int, required=True)\n\t\tparser.add_argument('--start_analysis', action='store_true', help='start analysis after loading bams')\n\t\tparser.add_argument('--flowcell_by_lane', action='store_true', help='set to split flowcells into lanes. Used for Broad HiSeq or NovaSeq')\n\t\tparser.add_argument('--nuclear_subdirectory', help='directory for nuclear files under demultiplex directory')\n\t\tparser.add_argument('--mt_subdirectory', help='directory for mt files under demultiplex directory')\n\t\t\n\tdef handle(self, *args, **options):\n\t\tdate_string = options['date_string']\n\t\tname = options['name']\n\t\tdate = datetime.datetime.strptime(date_string, \"%Y%m%d\").date()\n\t\tstart_analysis = options['start_analysis']\n\t\tflowcell_by_lane = options['flowcell_by_lane']\n\t\tnuclear_subdirectory = options['nuclear_subdirectory'] if options['nuclear_subdirectory'] else settings.NUCLEAR_SUBDIRECTORY\n\t\tmt_subdirectory = options['mt_subdirectory'] if options['mt_subdirectory'] else settings.MT_SUBDIRECTORY\n\t\t\n\t\t# save flowcell for this SequencingAnalysisRun\n\t\tflowcell_text_ids = self.get_flowcell_text_ids(date_string, name, flowcell_by_lane)\n\t\tflowcell_objs = []\n\t\tfor flowcell_text_id in flowcell_text_ids:\n\t\t\tflowcell_obj, created = Flowcell.objects.get_or_create(flowcell_text_id=flowcell_text_id, sequencing_date=date)\n\t\t\tflowcell_objs.append(flowcell_obj)\n\t\t\t#print(flowcell_text_id)\n\t\t\t#print(flowcell_obj)\n\t\t\t\n\t\t# save the flowcell(s) as part of the analysis run, if there is one\n\t\t# make this optional so we can add demultiplexing results without having to start from interface\n\t\ttry:\n\t\t\tif options['analysis_run']:\n\t\t\t\tanalysis_run_id = options['analysis_run']\n\t\t\t\tanalysis_run = SequencingAnalysisRun.objects.get(id=analysis_run_id)\n\t\t\t\tfor flowcell_obj in flowcell_objs:\n\t\t\t\t\tanalysis_run.triggering_flowcells.add(flowcell_obj)\n\t\t\t\tanalysis_run.save()\n\t\t\t\t#print(analysis_run_id)\n\t\texcept SequencingAnalysisRun.DoesNotExist:\n\t\t\tpass\n\t\t\n\t\t# add demultiplexed bams to database\n\t\t#nuclear\n\t\t# TODO identify reference properly\n\t\tself.load_demultiplexed_bams_into_database(date_string, name, flowcell_objs, nuclear_subdirectory, 'hg19')\n\t\t#mt\n\t\tself.load_demultiplexed_bams_into_database(date_string, name, flowcell_objs, mt_subdirectory, 'rsrs')\n\t\t\n\t\tif start_analysis:\n\t\t\tanalysis_run = SequencingAnalysisRun.objects.get(id=analysis_run_id)\n\t\t\tflowcell_text_ids += [flowcell_object.flowcell_text_id for flowcell_object in analysis_run.prior_flowcells_for_analysis.all()]\n\t\t\tprepare_to_assemble_libraries(date_string, name, flowcell_text_ids)\n\t\t\tstart_result = start_cromwell(date_string, name, ANALYSIS_COMMAND_LABEL)\n\t\t\t# retrieve SLURM job number from output\n\t\t\tfor line in start_result.stdout.readlines():\n\t\t\t\tself.stdout.write(line)\n\t\t\t\tm = re.match('Submitted batch job[\\s]+(\\d+)', line)\n\t\t\t\tif m is not None:\n\t\t\t\t\tanalysis_run.slurm_job_number = int(m.group(1))\n\t\t\tanalysis_run.processing_state = SequencingAnalysisRun.RUNNING_ANALYSIS\n\t\t\tanalysis_run.save()\n\t\t\n\n\tdef load_demultiplexed_bams_into_database(self, date_string, name, flowcells, subdirectory, reference):\n\t\t# read the list bam files\n\t\tdirectory_str = \"{}/{}_{}/{}\".format(settings.DEMULTIPLEXED_PARENT_DIRECTORY, date_string, name, subdirectory)\n\t\tpathlist = pathlib.Path(directory_str).glob(\"*.bam\")\n\t\tfor filename in pathlist:\n\t\t\t#print(filename)\n\t\t\tbam_filename = filename.name\n\t\t\t#print(bam_filename)\n\t\t\t# filename contains index-barcode key\n\t\t\tkey = filename.stem\n\t\t\ti5, i7, p5, p7 = index_barcode_key_to_fields(key)\n\t\t\tbam_path = \"{}/{}_{}/{}/{}\".format(settings.DEMULTIPLEXED_PARENT_DIRECTORY, date_string, name, subdirectory, bam_filename)\n\t\t\tsequenced, created = DemultiplexedSequencing.objects.get_or_create(i5_index = i5, i7_index = i7, p5_barcode = p5, p7_barcode = p7, reference = reference, path = bam_path)\n\t\t\tfor flowcell in flowcells:\n\t\t\t\tsequenced.flowcells.add(flowcell)\n\t\t\tsequenced.save()\n\t\t\tself.stderr.write('{}\\tcreated: {}'.format(bam_filename, str(created)))\n\t\t\t\n\t# Find the flowcell text id from a demultiplexed flowcell, using the contents of the Illumina fastq headers\n\tdef get_flowcell_text_ids(self, date_string, name, flowcell_by_lane):\n\t\t# if we are running on the web host, try to read the file directly\n\t\tread_groups_file_path = \"{}/{}_{}/read_groups\".format(settings.DEMULTIPLEXED_PARENT_DIRECTORY, date_string, name)\n\t\t# now try with nonshortened\n\t\ttry:\n\t\t\twith open(read_groups_file_path) as f:\n\t\t\t\treturn self.read_flowcell_text_ids_from_file_contents(f, flowcell_by_lane)\n\t\texcept FileNotFoundError:\n\t\t\t# it looks like we are not on an orchestra/O2 web host, so ssh onto an O2 server to retrieve file\n\t\t\tcommand = \"cat {}\".format(read_groups_file_path)\n\t\t\tssh_result = ssh_command(settings.COMMAND_HOST, command, None, self.stderr)\n\t\t\tresult = ssh_result.stdout.readlines()\n\t\t\treturn self.read_flowcell_text_ids_from_file_contents(result, flowcell_by_lane)\n\t\treturn None\n\t\n\t# Retrieve the flowcell ID(s) from a list of lines\n\t# flowcell_by_lane is a boolean indicating whether to include lane numbers in IDs\n\t# output will look like 'HWCHLBGX3' or 'HWCHLBGX3.1' depending on this boolean\n\tdef read_flowcell_text_ids_from_file_contents(self, result, flowcell_by_lane):\n\t\t# example line:\n\t\t# PM:NS500217 PU:HWCHLBGX3.488.1\n\t\tflowcell_text_ids = []\n\t\tfor line in result:\n\t\t\tfields = line.split()\n\t\t\tplatform_unit = fields[1]\n\t\t\tif platform_unit.startswith('PU:'):\n\t\t\t\tplatform_unit_fields = re.split('\\.|\\n', platform_unit[3:])\n\t\t\t\tflowcell_text_id = platform_unit_fields[0]\n\t\t\t\tlane = platform_unit_fields[2]\n\t\t\t\tif flowcell_by_lane:\n\t\t\t\t\tflowcell_text_id += '.{}'.format(lane)\n\t\t\t\tif flowcell_text_id not in flowcell_text_ids:\n\t\t\t\t\tflowcell_text_ids.append(flowcell_text_id)\n\t\treturn flowcell_text_ids\n","repo_name":"DReichLab/waldo","sub_path":"sequencing_run/management/commands/load_demultiplexed.py","file_name":"load_demultiplexed.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"} +{"seq_id":"12450254506","text":"import train as train\nimport sys, os\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\n\nif len(sys.argv) <= 1:\n quit()\n\nimage_size = 50\ninput_dir = 'images'\ncategories = [name for name in os.listdir(input_dir) if name != \".DS_Store\"]\n\nX = []\nfor file_name in sys.argv[1:]:\n img = Image.open(file_name)\n img = img.convert(\"RGB\")\n img = img.resize((image_size, image_size))\n in_data = np.asarray(img)\n X.append(in_data)\n\nX = np.array(X)\n\nmodel = train.TrainModel().train(X.shape[1:])\nmodel.load_weights(\"./model/flower-model.hdf5\")\n\npredict = model.predict(X)\n\nfor pre in predict:\n y = pre.argmax()\n print(\"花の名前 : \", categories[y])\n\n\n\n\n","repo_name":"tsunaki00/flower","sub_path":"python/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"1573591899","text":"import gensim\nimport nltk\nimport re\n\nclass SentencesSemanticSimilarity():\n\n query = \"\"\n sentencesSet = []\n sanitizedSentencesSet = []\n dictionary = {}\n corpus = []\n processedQuery = \"\"\n\n def __init__(self, query, sentencesSet):\n self.query = query\n self.sentencesSet = sentencesSet\n self.sanitizedSentencesSet = self.preProcessSentences()\n self.dictionary = self.createDictionary()\n self.corpus = self.createCorpus()\n\n\n #Sanitize and tokenize\n #A document will now be a list of tokens.\n def preProcessSentences(self):\n processedSentences = []\n\n\n #Keep alphanumeric\n for sentence in self.sentencesSet:\n sentence = re.sub(r'^[a-zA-Z0-9\\s]', '', sentence)\n\n\n #Tokenize\n processedSentences = [[w.lower() for w in nltk.word_tokenize(sentence)] for sentence in self.sentencesSet]\n\n return processedSentences\n\n\n\n #We will create a dictionary from a list of documents. A dictionary maps every word to a number.\n def createDictionary(self):\n return gensim.corpora.Dictionary(self.preProcessSentences())\n\n\n\n #Now we will create a corpus. A corpus is a list of bags of words.\n # A bag-of-words representation for a document just lists the number of times each word occurs in the document.\n def createCorpus(self):\n return [self.dictionary.doc2bow(sentence) for sentence in self.preProcessSentences()]\n\n\n #calculate similarity\n def getSimilarity(self):\n # we create a tf-idf model from the corpus. Note that num_nnz is the number of tokens.\n tf_idf = gensim.models.TfidfModel(self.corpus)\n s = 0\n for i in self.corpus:\n s += len(i)\n\n # we will create a similarity measure object in tf-idf space.\n # tf-idf stands for term frequency-inverse document frequency. Term frequency\n # is how often the word shows up in the document and inverse document fequency scales the value by how rare the word is in the corpus.\n sims = gensim.similarities.Similarity('/Users/amosmadalinneculau/Desktop/', tf_idf[self.corpus], num_features=len(self.dictionary))\n\n #now create a query document and convert it to tf-idf.\n self.query = re.sub(r'^[a-zA-Z0-9\\s]', '', self.query)\n self.query = [w.lower() for w in nltk.word_tokenize(self.query)]\n\n query_doc_bow = self.dictionary.doc2bow(self.query)\n query_doc_tf_idf = tf_idf[query_doc_bow]\n\n #Return an array of document similarities to query. We see that the second document\n # is the most similar with the overlapping of socks and force.\n return sims[query_doc_tf_idf]\n\n# import ssl\n#\n# try:\n# _create_unverified_https_context = ssl._create_unverified_context\n# except AttributeError:\n# pass\n# else:\n# ssl._create_default_https_context = _create_unverified_https_context\n#\n# nltk.download()\n\n\ntest = SentencesSemanticSimilarity(\"Socks are a force for good.\",\n [\"I'm taking the show on the road.\",\n \"My socks are a force multiplier.\",\n \"I am the barber who cuts everyone's hair who doesn't cut their own.\",\n \"Legend has it that the mind is a mad monkey.\",\n \"I make my own fun.\"])\n\nprint(test.getSimilarity())","repo_name":"Amos94/SentencesSemanticSimilarity","sub_path":"SentencesSemanticSimilarity.py","file_name":"SentencesSemanticSimilarity.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"20909707477","text":"#!/usr/bin/env python3\n\nimport datetime as dt\nimport argparse\nimport os, sys\nimport numpy as np\nfrom netCDF4 import Dataset\n\ndef parseCommandLine():\n parser = argparse.ArgumentParser(description=(\"merge daily salinity restore files\"))\n parser.add_argument(\"--template_file_path\", required=True, help=(\n \"MOM6 template file to which we write daily salinity\"))\n parser.add_argument(\"--start_date\", required=True, help=())\n parser.add_argument(\"--end_date\", required=True, help=())\n parser.add_argument(\"--daily_sal_dir\", required=True, help=(\n \"directory name to store daily salinity file\"))\n parser.add_argument(\"--daily_sal_file_name\", required=True, help=(\n \"something like sal_YYYYMMDD.nc where YYYYMMDD will be replaced with actual dates\"))\n parser.add_argument(\"--output_file_name\", required=False, type=str, help=(\n \"rename template file after finishing output\"))\n args = parser.parse_args()\n\n args.start_date = dt.datetime.strptime(args.start_date,\"%Y%m%d\")\n args.end_date = dt.datetime.strptime(args.end_date,\"%Y%m%d\")\n\n\n args.template_file_path = os.path.abspath(args.template_file_path)\n if not os.path.exists(args.template_file_path):\n raise Exception(\"template_file_path ({}) does not exist\".format(args.template_file_path))\n sys.exit(1)\n\n args.daily_sal_dir = os.path.abspath(args.daily_sal_dir)\n if not os.path.exists(args.daily_sal_dir):\n raise Exception(\"daily_sal_dir ({}) does not exist\".format(args.daily_sal_dir))\n sys.exit(2)\n\n if args.output_file_name is not None:\n if os.path.exists(args.output_file_name): \n raise Exception(\"output_file_name ({}) already exists\".format(args.output_file_name))\n sys.exit(3)\n\n\n print(args)\n return args\n\ndef main(args):\n ndays = (args.end_date - args.start_date).days + 1\n cdate = args.start_date\n\n while cdate <= args.end_date:\n if cdate.day == 1: print(\"reading daily file: \"+cdate.strftime(\"%Y%m%d\"))\n\n sal_daily_file = args.daily_sal_file_name.replace(\"YYYYMMDD\",cdate.strftime(\"%Y%m%d\"))\n sal_daily_path = os.path.join(args.daily_sal_dir, sal_daily_file)\n if not os.path.exists(sal_daily_path):\n raise Exception(\"daily salnity file ({}) does not exist\".format(sal_daily_path))\n sys.exit(4)\n\n f = Dataset(sal_daily_path)\n wk2d = f.variables['sss_remapped_mom'][:]\n f.close()\n\n if cdate == args.start_date:\n nlat, nlon = wk2d.shape\n print(\"nlat,nlon=\", nlat,nlon)\n ndays = (args.end_date - args.start_date).days + 1\n sal_aggr = np.zeros((ndays, nlat, nlon))\n day_aggr = np.zeros(ndays)\n base_date = dt.datetime(1900,1,1,0,0,0)\n \n idx_day = (cdate - args.start_date).days\n sal_aggr[idx_day,:,:] = wk2d\n day_aggr[idx_day] = (cdate - base_date).days\n\n cdate += dt.timedelta(days=1)\n\n f = Dataset(args.template_file_path,\"r+\")\n f.variables[\"SALT\"][:] = sal_aggr.astype(np.float32)\n f.variables[\"time\"][:] = day_aggr.astype(np.float32)\n f.close()\n\n if args.output_file_name is not None:\n os.rename(args.template_file_path, args.output_file_name)\n\n\nif __name__ == '__main__':\n args = parseCommandLine()\n main(args)\n","repo_name":"UMD-AOSC/Ocean-LETKF","sub_path":"utils/obs_proc/level-4/aggregate_sal_restore_daily.py","file_name":"aggregate_sal_restore_daily.py","file_ext":"py","file_size_in_byte":3356,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"46"} +{"seq_id":"24556572677","text":"import time\n\nimport tushare as ts\n\n# today_trade_data=ts.get_day_all('2019-03-29')\n# today_trade_data=ts.get_day_all('2019-04-01')\n# # today_trade_data.to_csv('/root/tushare/20190401/daytrade_data.csv')\n# # today_trade_data.to_csv('E:\\PythonProject\\stockPortrait\\daytrade_data.csv', encoding='utf-8', index=False,header=False)\n# print(today_trade_data)\n# today_trade_data.describe\n\npro = ts.pro_api('4d7357aee9bef99c3b5d61f37a3451535f4cdd6a63fe45e3b0080c4e')\n# df = pro.daily(trade_date='20190401')\n# print(df)\n# print(pro.daily(trade_date='20190402'))\n# pro.query()\n# print(pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date'))\nnow = time.strftime(\"%Y%m%d\", time.localtime())\ntrade_cal = pro.trade_cal(exchange='', start_date='20190101',end_date=now)\n# print(trade_cal)\nwrite_count=0\nfor index,row in trade_cal.iterrows():\n\t# print(rowa)\n\t#print(row[-1],row[-2])\n\tif write_count==0:\n\t\tmode = 'w'\n\telse:\n\t\tmode = 'a'\n\tif row[-1]==1:\n\t\twrite_count+=1\n\t\tdaily=pro.daily(trade_date=row[-2])\n\t\tprint(row[-2])\n\t\tdaily.to_csv('E:\\PythonProject\\stockPortrait\\daytrade_data.csv', encoding='utf-8', index=False,header=False,mode=mode)\n\n\n\n# today_trade_data=pd.read_csv('/root/tushare/20190401/daytrade_data.csv',index_col='code')\n# today_trade_data.drop([ 'Unnamed: 0'],axis=1,inplace=True)\n# today_trade_data.to_csv('/root/tushare/20190401/result.csv')\n# print(today_trade_data)\n# print(today_trade_data.describe())\n\n","repo_name":"wanwanpp/stock-portrait-data","sub_path":"dao/DailyTradeOld.py","file_name":"DailyTradeOld.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"13014449793","text":"import copy\n\nf = open('input', 'r')\n\nsequence = [c for l in f.readlines() for c in l]\n\nprint(len(sequence))\n\nf.close()\n\nshapes = [\n [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 1, 1]\n ],\n [\n [0, 0, 0, 0],\n [0, 1, 0, 0],\n [1, 1, 1, 0],\n [0, 1, 0, 0]\n ],\n [\n [0, 0, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 1, 0],\n [1, 1, 1, 0]\n ],\n [\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0],\n [1, 0, 0, 0]\n ],\n [\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [1, 1, 0, 0],\n [1, 1, 0, 0]\n ]\n]\n\ngrid = [[0 for i in range(7)] for j in range(10000 * 4)]\n\ngrid.append([1 for i in range(7)])\n\ntop_of_tower = len(grid) - 1\n\noverall_moves = 0\n\nfor i in range(10000):\n # start 3 squares away from top of tower\n starting_height = top_of_tower - 3\n\n # start offset by 2\n offset = 2\n \n shape_index = i % len(shapes)\n\n shape = shapes[shape_index]\n\n k = 0\n\n stopped = False\n\n while starting_height + k < len(grid) and not stopped:\n movement = sequence[overall_moves % len(sequence)]\n\n valid = True\n\n if movement == '<':\n for h, line in enumerate(shape):\n for w, val in enumerate(line):\n y, x = starting_height + k - 4 + h, offset - 1 + w\n\n if x < 0 or x >= len(grid[0]):\n if val:\n valid = False\n continue\n\n g = grid[y][x]\n if g & val:\n valid = False\n\n if valid:\n offset -= 1\n\n if movement == '>':\n\n for h, line in enumerate(shape):\n for w, val in enumerate(line):\n y, x = starting_height + k - 4 + h, offset + 1 + w\n\n if x < 0 or x >= len(grid[0]):\n if val:\n valid = False\n continue\n\n g = grid[y][x]\n if g & val:\n valid = False\n\n if valid: \n offset += 1\n\n overall_moves += 1\n\n went_down = True\n\n for h, line in enumerate(shape):\n for w, val in enumerate(line):\n if starting_height + k + 1 - 4 + h >= len(grid):\n went_down = not val\n continue\n\n # wouldn't have been positioned like this if not valid\n if offset + w >= 0 and offset + w < len(grid[0]): \n g = grid[starting_height + k + 1 - 4 + h][offset + w]\n if g & val:\n went_down = False\n\n if went_down: \n k += 1\n else:\n stopped = True\n\n new_starting_height = starting_height\n\n # update grid with where the thing is \n for h in range(4):\n for w in range(4):\n y, x = starting_height + k - 4 + h, offset + w\n\n if y < len(grid) and y >= 0 and x < len(grid[0]) and x >= 0:\n grid[y][x] |= shape[h][w]\n if grid[y][x]:\n top_of_tower = min(starting_height + k - 4 + h, top_of_tower)\n\nfor j, line in enumerate(grid):\n print(['#' if c else '.' for c in line])\n\nprint(len(grid) - top_of_tower - 1)\n\n\n\n","repo_name":"smonfourny/advent-2022","sub_path":"17/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"46"} +{"seq_id":"7403790302","text":"import os\n\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom easygraph.classes.graph import Graph\n\nfrom .eg_dataset import EasyGraphBuiltinDataset\nfrom .utils import _get_dgl_url\nfrom .utils import _set_labels\nfrom .utils import data_type_dict\nfrom .utils import tensor\n\n\n__all__ = [\n \"AmazonCoBuyComputerDataset\",\n]\n\n\nclass GNNBenchmarkDataset(EasyGraphBuiltinDataset):\n r\"\"\"Base Class for GNN Benchmark dataset\n\n Reference: https://github.com/shchur/gnn-benchmark#datasets\n \"\"\"\n\n def __init__(\n self, name, raw_dir=None, force_reload=False, verbose=True, transform=None\n ):\n _url = _get_dgl_url(\"dataset/\" + name + \".zip\")\n super(GNNBenchmarkDataset, self).__init__(\n name=name,\n url=_url,\n raw_dir=raw_dir,\n force_reload=force_reload,\n verbose=verbose,\n transform=transform,\n )\n\n def process(self):\n npz_path = os.path.join(self.raw_path, self.name + \".npz\")\n g = self._load_npz(npz_path)\n # g = transforms.reorder_graph(\n # g, node_permute_algo='rcmk', edge_permute_algo='dst', store_ids=False)\n self._graph = g\n self._data = [g]\n self._print_info()\n\n def has_cache(self):\n graph_path = os.path.join(self.save_path, \"dgl_graph_v1.bin\")\n if os.path.exists(graph_path):\n return True\n return False\n\n # def save(self):\n # graph_path = os.path.join(self.save_path, 'dgl_graph_v1.bin')\n # save_graphs(graph_path, self._graph)\n #\n # def load(self):\n # graph_path = os.path.join(self.save_path, 'dgl_graph_v1.bin')\n # graphs, _ = load_graphs(graph_path)\n # self._graph = graphs[0]\n # self._data = [graphs[0]]\n # self._print_info()\n\n def _print_info(self):\n if self.verbose:\n print(\" NumNodes: {}\".format(self._graph.number_of_nodes()))\n print(\" NumEdges: {}\".format(2 * self._graph.number_of_edges()))\n print(\" NumFeats: {}\".format(self._graph.ndata[\"feat\"].shape[-1]))\n print(\" NumbClasses: {}\".format(self.num_classes))\n\n def _load_npz(self, file_name):\n with np.load(file_name, allow_pickle=True) as loader:\n loader = dict(loader)\n num_nodes = loader[\"adj_shape\"][0]\n adj_matrix = sp.csr_matrix(\n (loader[\"adj_data\"], loader[\"adj_indices\"], loader[\"adj_indptr\"]),\n shape=loader[\"adj_shape\"],\n ).tocoo()\n\n if \"attr_data\" in loader:\n # Attributes are stored as a sparse CSR matrix\n attr_matrix = sp.csr_matrix(\n (\n loader[\"attr_data\"],\n loader[\"attr_indices\"],\n loader[\"attr_indptr\"],\n ),\n shape=loader[\"attr_shape\"],\n ).todense()\n elif \"attr_matrix\" in loader:\n # Attributes are stored as a (dense) np.ndarray\n attr_matrix = loader[\"attr_matrix\"]\n else:\n attr_matrix = None\n\n if \"labels_data\" in loader:\n # Labels are stored as a CSR matrix\n labels = sp.csr_matrix(\n (\n loader[\"labels_data\"],\n loader[\"labels_indices\"],\n loader[\"labels_indptr\"],\n ),\n shape=loader[\"labels_shape\"],\n ).todense()\n elif \"labels\" in loader:\n # Labels are stored as a numpy array\n labels = loader[\"labels\"]\n else:\n labels = None\n if hasattr(adj_matrix, \"format\"):\n print(\"can be generate eg!\")\n g = Graph(incoming_graph_data=adj_matrix)\n # g = transforms.to_bidirected(g)\n g = _set_labels(g, labels)\n g.ndata[\"feat\"] = tensor(attr_matrix, data_type_dict()[\"float32\"])\n g.ndata[\"label\"] = tensor(labels, data_type_dict()[\"int64\"])\n return g\n\n @property\n def num_classes(self):\n \"\"\"Number of classes.\"\"\"\n raise NotImplementedError\n\n def __getitem__(self, idx):\n r\"\"\"Get graph by index\n\n Parameters\n ----------\n idx : int\n Item index\n\n Returns\n -------\n :class:`dgl.DGLGraph`\n\n The graph contains:\n\n - ``ndata['feat']``: node features\n - ``ndata['label']``: node labels\n \"\"\"\n assert idx == 0, \"This dataset has only one graph\"\n if self._transform is None:\n return self._graph\n else:\n return self._transform(self._graph)\n\n def __len__(self):\n r\"\"\"Number of graphs in the dataset\"\"\"\n return 1\n\n\nclass AmazonCoBuyComputerDataset(GNNBenchmarkDataset):\n r\"\"\"'Computer' part of the AmazonCoBuy dataset for node classification task.\n\n Amazon Computers and Amazon Photo are segments of the Amazon co-purchase graph [McAuley et al., 2015],\n where nodes represent goods, edges indicate that two goods are frequently bought together, node\n features are bag-of-words encoded product reviews, and class labels are given by the product category.\n\n Reference: ``_\n\n Statistics:\n\n - Nodes: 13,752\n - Edges: 491,722 (note that the original dataset has 245,778 edges but DGL adds\n the reverse edges and remove the duplicates, hence with a different number)\n - Number of classes: 10\n - Node feature size: 767\n\n Parameters\n ----------\n raw_dir : str\n Raw file directory to download/contains the input data directory.\n Default: ~/.dgl/\n force_reload : bool\n Whether to reload the dataset. Default: False\n verbose : bool\n Whether to print out progress information. Default: True.\n transform : callable, optional\n A transform that takes in a :class:`~dgl.DGLGraph` object and returns\n a transformed version. The :class:`~dgl.DGLGraph` object will be\n transformed before every access.\n\n Attributes\n ----------\n num_classes : int\n Number of classes for each node.\n\n Examples\n --------\n >>> data = AmazonCoBuyComputerDataset()\n >>> g = data[0]\n >>> num_class = data.num_classes\n >>> feat = g.ndata['feat'] # get node feature\n >>> label = g.ndata['label'] # get node labels\n \"\"\"\n\n def __init__(self, raw_dir=None, force_reload=False, verbose=True, transform=None):\n super(AmazonCoBuyComputerDataset, self).__init__(\n name=\"amazon_co_buy_computer\",\n raw_dir=raw_dir,\n force_reload=force_reload,\n verbose=verbose,\n transform=transform,\n )\n\n @property\n def num_classes(self):\n \"\"\"Number of classes.\n\n Return\n -------\n int\n \"\"\"\n return 10\n","repo_name":"easy-graph/Easy-Graph","sub_path":"easygraph/datasets/gnn_benchmark.py","file_name":"gnn_benchmark.py","file_ext":"py","file_size_in_byte":6940,"program_lang":"python","lang":"en","doc_type":"code","stars":323,"dataset":"github-code","pt":"46"} +{"seq_id":"14450848007","text":"from run_shuffles import *\nfrom infile_parser import read_infiles\n\nHANDSIZE = 7\nPRIZEAMOUNT = 6\n\n\ndef __main__(infile_decklist, infile_relations, amount=20000):\n relations, basics, DECKLIST = read_infiles(infile_decklist, infile_relations)\n nobasic_amount = run_shuffles(relations, basics, amount, DECKLIST)\n print('Odds for basic in percentile:')\n print(round(100 - nobasic_amount / amount * 100, 2))\n for relation in relations:\n print(relation)\n print(round(relations[relation] / amount * 100, 2))\n print('\\nByeBye')\n\n\n__main__('Decklist.csv', 'Relations.txt')\n","repo_name":"FusionDota2/PokemonTCG_Calculator","sub_path":"BackEnd/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"33722497507","text":"import sys\nimport numpy as np\nimport json\nimport argparse\nparser = argparse.ArgumentParser(description='Input')\nparser.add_argument('-input', action='store', dest='input_file_path',help='input file')\nargs = parser.parse_args()\n\nwith open(args.input_file_path, 'rb') as fp:\n data = fp.read()\n#print(data)\ndata=str(data)\n\nimport pickle\ndef count(data):\n data_=data.split(\"}{\")[:-1]\n data_[0]=data_[0][3:]\n key_list=[]\n for i in range(len(data_)):\n data_[i]=\"{\"+data_[i]+\"}\"\n json_obj=data_[i]\n json_obj=json.loads(json_obj)\n tmplist=list(json_obj.keys())\n key_list.append(','.join(tmplist))\n return key_list\nkey_list=count(data)\nkey_final=[]\nfor i in key_list:\n if i not in key_final:\n key_final.append(i)\nprint(key_final)\nwith open('hpack_key_audit.pickle','wb') as f:\n pickle.dump(key_final,f)\n\n","repo_name":"RU-System-Software-and-Security/ELISE-2021","sub_path":"audit/extract_audit.py","file_name":"extract_audit.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"46"} +{"seq_id":"28498423440","text":"alphabet = 'abcdefghijklmnopqrstuvwxyz'\nanswer = -3\nOriginalMessage = ''\n\nmessage = input ('Please enter a message: ')\n\n\nfor character in message:\n if character in alphabet :\n position = alphabet.find(character)\n OriginalPosition = (position + answer)%26\n OriginalCharacter = alphabet[OriginalPosition]\n OriginalMessage += OriginalCharacter\n else :\n OriginalMessage += character\n \nprint ('Your decrypted message is ' , OriginalMessage)\n \n","repo_name":"stiblook/CodeClub","sub_path":"Python/MessageDecrypter.py","file_name":"MessageDecrypter.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28951941209","text":"money = int(input(\"Money:\"))\nper_cent = {'ТКБ': 5.6, 'СКБ': 5.9, 'ВТБ': 4.28, 'СБЕР': 4.0}\nprocent = list(per_cent.values())\nbank_ТКБ = round(procent[0] / 100 * money)\nbank_СКБ = round(procent[1] / 100 * money)\nbank_ВТБ = round(procent[2] / 100 * money)\nbank_СБЕР = round(procent[3] / 100 * money)\ndeposit = [bank_ТКБ, bank_СКБ, bank_ВТБ, bank_СБЕР]\nprint(\"Накопленные средства за год вклада в каждом из банков —\", deposit)\nprint(\"Максимальная сумма, которую вы можете заработать —\", deposit[1])","repo_name":"23Andrey23/Skill_Factory","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"257775708","text":"'''\nMake an alt/az plot of one or multiple objects for a given date from a given location.\n\nterminal execution: \n if supplying a list of objects:\n python PlotObserving.py --SimbadName \"HD 22259\",\"DS Tuc\",\"TYC 26-39-1\" --LocationName \"Las Campanas Observatory\" --DateString \"2022-12-10\" --UTCOffset 3 --filename test.png\n if supplying a single object: \n python PlotObserving.py --SimbadName \"HD 22259\" --LocationName \"Las Campanas Observatory\" --DateString \"2022-12-10\" --UTCOffset 3 --filename test.png\n'''\n\ndef PlotObserving(SimbadName,DateString,LocationName,UTCOffset,\n plt_style = 'default',\n savefig = False,\n filename = 'observing_plot.png',\n form = 'png',\n dpi = 300,\n figsize=(7, 6),\n cmaps = ['Blues','Oranges','Purples','Reds','Greens']\n ):\n ''' Make an alt/az plot of one or multiple objects for a given date from a given location.\n Args:\n SimbadName: [str or list of strings]: Simbad resolvable name of object(s)\n DateString [str]: A string of date. Ex: '2022-12-10'\n LocationName [str]: Name of Earth Location recognized by astropy. Ex: 'Las Campanas Observatory'\n UTCOffset [int or flt]: offset of location from UTC\n plt_style [str]: specify the matplotlib plot style to use\n savefig [bool]: set to True to save the figure to file\n filename [str]: output file name is saving figure\n dpi [int]: dpi for fig\n figsize [tuple]: figure size\n cmaps [list]: colormaps to use for plotting motion of objects\n '''\n from astropy.coordinates import get_sun, get_moon\n from astropy.coordinates import EarthLocation, AltAz, SkyCoord\n from astropy.time import Time\n import numpy as np\n import astropy.units as u\n import matplotlib.pyplot as plt\n plt.style.use(plt_style)\n\n \n SimbadName = SimbadName.split(',')\n\n nobs = len(SimbadName)\n #objects = SkyCoord.from_name(SimbadName)\n oblist = []\n for i in range(len(SimbadName)):\n ob = SkyCoord.from_name(SimbadName[i])\n oblist.append(ob)\n\n utc_offset = UTCOffset*u.hour\n location = EarthLocation.of_site(LocationName)\n\n TimeString = DateString + ' 00:00:00'\n\n midtime = Time(TimeString,scale='utc')+utc_offset\n # Establish times:\n midnight = midtime\n delta_midnight = np.linspace(-12, 12, 1000)*u.hour\n times = midnight + delta_midnight\n # Establish new AltAz frame at given location:\n altazframe = AltAz(obstime=times, location=location)\n # Sun, Moon, and all inputed object's into AltAz frame:\n sunaltazs = get_sun(times).transform_to(altazframe)\n moonaltazs = get_moon(times).transform_to(altazframe) \n \n # Make the plot:\n fig = plt.figure(figsize=figsize)\n # Plot sun and moon:\n plt.plot(delta_midnight, sunaltazs.alt, color='orange', label='Sun')\n plt.plot(delta_midnight, moonaltazs.alt, color='darkgrey', label='Moon')\n \n # Plot object(s):\n for i,objects in enumerate(oblist):\n obaltazs = objects.transform_to(altazframe)\n plt.scatter(delta_midnight, obaltazs.alt, c=obaltazs.az, cmap=cmaps[i],label=SimbadName[i],s=8,lw=0)\n \n plt.fill_between(delta_midnight.value, 0, 90, sunaltazs.alt.value < -0, color='0.5', zorder=0)\n plt.fill_between(delta_midnight.value, 0, 90, sunaltazs.alt.value < -18, color='k', zorder=0)\n plt.ylim(0,90)\n plt.xticks(np.arange(13)*2 -12)\n leg = plt.legend(loc='upper left')\n plt.colorbar().set_label('Azimuth [deg]')\n plt.xlabel('Hours from Local Midnight') \n plt.ylabel('Altitude [deg]')\n #plt.annotate(midtime.datetime, xy=(0.14,0.15),xycoords = 'figure fraction',fontsize=8)\n plt.grid(ls=':')\n plt.show()\n plt.close(fig)\n if savefig == True:\n plt.savefig(filename, format=form, dpi=dpi)\n return fig\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--SimbadName', type=str, required=True, help='Supply the Simbad name. If supplying a list, enclose each in double quotes separated by a comma with no space. Ex: --SimbadName \"HD 22259\",\"DS Tuc\",\"TYC 26-39-1\"')\nparser.add_argument('--LocationName', type=str, required=True, help='Astropy Earth Location recognizeable name, enclose in double quotes to include spaces. Ex: --LocationName \"Las Campanas Observatory\"')\nparser.add_argument('--DateString', type=str, required=True, help='Date of observation in YYYY-MM-DD')\nparser.add_argument('--UTCOffset', type=int, required=True, help='UTC Offset of location in hours')\nparser.add_argument('--filename', type=str, help='Filename for saved plot. If not provided, name will be SimbadName_DateString_ObservingPlot.png')\n\nargs = parser.parse_args()\nSimbadName = args.SimbadName\nLocationName = args.LocationName\nDateString = args.DateString\nUTCOffset = args.UTCOffset\nif args.filename:\n filename = args.filename\nelse:\n filename = SimbadName + '_' + DateString + '_ObservingPlot.png'\n\n\nax = PlotObserving(SimbadName,DateString,LocationName,UTCOffset,savefig=True,filename=filename)","repo_name":"logan-pearce/myastrotools","sub_path":"myastrotools/PlotObserving.py","file_name":"PlotObserving.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"4912266570","text":"from confluent_kafka import Consumer, KafkaError\nimport time\nimport matplotlib.pyplot as plt\n\n\nlatency = []\n\nc = Consumer({\n 'bootstrap.servers': 'localhost:9092',\n 'group.id': 'testTopic',\n 'auto.offset.reset': 'latest',\n 'fetch.wait.max.ms' : 1,\n 'metadata.request.timeout.ms' : 1000})\n\n#fetch.wait.max.ms : 5\n\ndef print_assignment(consumer, partitions):\n print('Assignment:', partitions)\n\nc.subscribe(['testTopic'],on_assign=print_assignment)\n\nm_number = 0\ncalled = True\n\n\nwhile True:\n msg = c.poll(30)\n\n\n if msg is None:\n break;\n if msg.error():\n print(\"Consumer error: {}\".format(msg.error()))\n continue\n\n if called is True:\n start = time.time()\n called = False\n\n m_time = int(round(time.time() * 1000)) - msg.timestamp()[1]\n #print(m_time)\n latency.append(m_time)\n m_number +=1\n\n\n\n\n\n\nprint(\"Average latency : {0}\".format(sum(latency)/m_number))\nplt.plot(latency)\nplt.ylabel('Letency : [ms]')\nplt.xlabel('Number of messages')\nplt.show()\n\n\nc.close()\n","repo_name":"Steeew47/Kafka","sub_path":"ConsC.py","file_name":"ConsC.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"19020590274","text":"from enarksh.DataLayer import DataLayer\nfrom enarksh.controller.node.ComplexNode import ComplexNode\n\n\nclass ScheduleNode(ComplexNode):\n \"\"\"\n Class for objects in the controller of type 'Schedule'.\n \"\"\"\n\n # ------------------------------------------------------------------------------------------------------------------\n def __init__(self, node_data):\n \"\"\"\n Object constructor.\n\n :param dict node_data:\n \"\"\"\n ComplexNode.__init__(self, node_data)\n\n self._run_id = 0\n \"\"\"\n The ID of the run of this schedule node.\n\n :type: int\n \"\"\"\n\n # ------------------------------------------------------------------------------------------------------------------\n def initialize(self,\n node_data,\n schedule,\n resources,\n resources_data,\n consumptions,\n consumptions_data,\n run_nodes,\n child_nodes,\n direct_predecessors,\n direct_successors,\n successors):\n \"\"\"\n :param dict node_data:\n :param dict schedule:\n :param dict resources:\n :param dict resources_data:\n :param dict consumptions:\n :param dict consumptions_data:\n :param dict run_nodes:\n :param dict child_nodes:\n :param dict direct_predecessors:\n :param dict direct_successors:\n :param dict successors:\n \"\"\"\n ComplexNode.initialize(self,\n node_data,\n schedule,\n resources,\n resources_data,\n consumptions,\n consumptions_data,\n run_nodes,\n child_nodes,\n direct_predecessors,\n direct_successors,\n successors)\n\n self._run_id = schedule['run_id']\n\n # ------------------------------------------------------------------------------------------------------------------\n def sync_state(self):\n ComplexNode.sync_state(self)\n\n DataLayer.enk_back_run_update_status(self._run_id, self._rnd_datetime_start, self._rnd_datetime_stop)\n\n# ----------------------------------------------------------------------------------------------------------------------\n","repo_name":"SetBased/py-enarksh","sub_path":"enarksh/controller/node/ScheduleNode.py","file_name":"ScheduleNode.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"46"} +{"seq_id":"69805300939","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass DropBlock2D(nn.Module):\n \"\"\"\n reference from https://github.com/miguelvr/dropblock/blob/master/dropblock/dropblock.py\n \"\"\"\n def __init__(self, drop_prob, block_size):\n super(DropBlock2D, self).__init__()\n self.drop_prob = drop_prob\n self.block_size = block_size\n\n def forward(self, x):\n if not self.training or self.drop_prob == 0.:\n return x\n else:\n # get gamma value\n gamma = self._compute_gamma(x)\n\n # sample mask\n mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()\n\n # place mask on input device\n mask = mask.to(x.device)\n\n # compute block mask\n block_mask = self._compute_block_mask(mask)\n\n # apply block mask\n out = x * block_mask[:, None, :, :]\n\n # scale output\n out = out * block_mask.numel() / block_mask.sum()\n\n return out\n\n def _compute_block_mask(self, mask):\n block_mask = F.max_pool2d(input=mask[:, None, :, :],\n kernel_size=(self.block_size, self.block_size),\n stride=(1, 1),\n padding=self.block_size // 2)\n\n if self.block_size % 2 == 0:\n block_mask = block_mask[:, :, :-1, :-1]\n\n block_mask = 1 - block_mask.squeeze(1)\n\n return block_mask\n\n def _compute_gamma(self, x):\n return self.drop_prob / (self.block_size ** 2)","repo_name":"zaynxalic/Unet-DRIVE","sub_path":"src/dropblock.py","file_name":"dropblock.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32848570307","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim: tabstop=4 shiftwidth=4 expandtab number\n\"\"\"\n给定两个二叉树,想象当你将它们中的一个覆盖到另一个上时,两个二叉树的一些节点便会重叠。\n\n你需要将他们合并为一个新的二叉树。合并的规则是如果两个节点重叠,那么将他们的值相加作为节点合并后的新值,否则不为 NULL 的节点将直接作为新二叉树的节点。\n\n示例 1:\n\n输入:\n\tTree 1 Tree 2\n 1 2\n / \\ / \\\n 3 2 1 3\n / \\ \\\n 5 4 7\n输出:\n合并后的树:\n\t 3\n\t / \\\n\t 4 5\n\t / \\ \\\n\t 5 4 7\n注意: 合并必须从两个树的根节点开始。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/merge-two-binary-trees\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\nAuthors: qianweishuo\nDate: 2019/11/12 下午5:39\n\"\"\"\nfrom definitions import TreeNode\n\n\nclass Solution(object):\n def mergeTrees(self, t1, t2):\n if t1 is None and t2 is None:\n return None\n elif t1 is None:\n return t2\n elif t2 is None:\n return t1\n else:\n node = TreeNode(t1.val + t2.val)\n node.left = self.mergeTrees(t1.left, t2.left)\n node.right = self.mergeTrees(t1.right, t2.right)\n return node\n","repo_name":"koyo922/leetcode","sub_path":"p617_merge_two_bin_trees.py","file_name":"p617_merge_two_bin_trees.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"32506216625","text":"import numpy as np\nimport sys\n\nsys.setrecursionlimit(4000)\n\narr = []\n\ndef load():\n with open(\"ADS2022_cv5kradezDAT.txt\", \"r\") as f:\n for i in f:\n x = [int(j.strip()) for j in i.split(',')]\n arr.append([x[0], x[1]])\n arr.append([x[2], x[3]])\n\nload()\nW = 2000\nn = 2000\ntable = np.zeros([len(arr) + 1, len(arr) + 1], dtype=int)\n\ndef K(i, w):\n if i <= 0 or w <= 0:\n return 0\n global arr, table\n if(table[i][w] != 0):\n return table[i][w]\n table[i][w] = max(K(i - 2, w), arr[i][0] + K(i - 2, w - arr[i][1]), arr[i - 1][0] + K(i - 2, w-arr[i - 1][1])) \n return table[i][w]\n\npath = []\n\ndef reverseSearch(i, w):\n if i <= 0:\n return\n global table, path\n place = []\n max = table[i - 2][w]\n place = [i - 2, w, max]\n if(max < arr[i][0] + table[i - 2][w - arr[i][1]]):\n max = arr[i][0] + table[i - 2][w - arr[i][1]]\n palce = [i - 2, w - arr[i][1], max]\n if(max < arr[i - 1][0] + table[i - 2][w-arr[i - 1][1]]):\n max = arr[i-1][0] + table[i - 2][w-arr[i - 1][1]]\n palce = [i - 2, w - arr[i - 1][1], max]\n path.append(place)\n reverseSearch(place[0], place[1])\n\n\nprint(K(n - 1, W))\n# reverseSearch(n - 1 ,W)\n# print(path)\n# np.savetxt(\"table.txt\", table, fmt=\"%s\")\n","repo_name":"HrcanJan/Mugger","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"31203775119","text":"\"\"\"Seed file to make sample data for db.\"\"\"\n\nfrom models import Article, Board, Feed, Source, User, db\nfrom app import app\nfrom external_apis import news_sources\n\n# Create all tables\ndb.drop_all()\ndb.create_all()\n\n\n######################### update database ###############################\n\nsources = news_sources()\nfor source in sources:\n id = source['id']\n name = source['name']\n description = source['description']\n url = source['url']\n category = source['category']\n language = source['language']\n country = source['country']\n\n s = Source(id=id, name=name, description=description, url=url, category=category, language=language, country=country)\n db.session.add(s)\n db.session.commit()\n\n \n\n################### Make a bunch of tables for testing database schemas ######################\n\n\n# s1 = Source(id=\"news1\", name=\"NEWS1\", description=\"news1 news\", \n# url=\"www.news1.com\", category=\"general\", language=\"en\", country=\"us\")\n# s2 = Source(id=\"news2\", name=\"NEWS2\", description=\"news2 news\", \n# url=\"www.news2.com\", category=\"general\", language=\"en\", country=\"us\")\n# s3 = Source(id=\"news3\", name=\"NEWS3\", description=\"news3 news\", \n# url=\"www.news3.com\", category=\"general\", language=\"en\", country=\"gb\")\n# s4 = Source(id=\"news4\", name=\"NEWS4\", description=\"news4 news\", \n# url=\"www.news4.com\", category=\"sports\", language=\"en\", country=\"us\")\n# s5 = Source(id=\"news5\", name=\"NEWS5\", description=\"news5 news\", \n# url=\"www.news5.com\", category=\"general\", language=\"en\", country=\"us\")\n\n# db.session.add_all([s1, s2, s3, s4, s5])\n# db.session.commit()\n\n# # Make a bunch of articles\n# a1 = Article(source_id=\"news5\", author=\"Ben\", title=\"I like cat\", description=\"I am hungry\", \n# url=\"www.news5.com\", img_url=\"www.news5.com/img.\", published_at=\"2020-05-01\", content=\"somthing..\")\n# a2 = Article(source_id=\"news1\", author=\"Joe\", title=\"friendly animals\", description=\"dog are good friends\", \n# url=\"www.news1.com\", img_url=\"www.news1.com/img.\", published_at=\"2020-05-01\", content=\"anything..\")\n# a3 = Article(source_id=\"news4\", author=\"John\", title=\"baseball season\", description=\"no more sports\", \n# url=\"www.news4.com\", img_url=\"www.news4.com/img.\", published_at=\"2020-05-01\", content=\"whatever you want..\")\n# a4 = Article(source_id=\"news3\", author=\"Lisa\", title=\"random article\", description=\"not sure what is in\", \n# url=\"www.news3.com\", img_url=\"www.news3.com/img.\", published_at=\"2020-05-01\", content=\"random content..\")\n# a5 = Article(source_id=\"news2\", author=\"Monica\", title=\"a day in Ny\", description=\"around timesquare\", \n# url=\"www.news2.com\", img_url=\"www.news2.com/img.\", published_at=\"2020-05-01\", content=\"empire state..\")\n\n# db.session.add_all([a1, a2, a3, a4, a5])\n# db.session.commit()\n\n# # # Make a bunch of users\n\n# u1 = User.signup(username=\"firstuser\", email=\"one@email.com\", password=\"firstuser\", country='us')\n# u2 = User.signup(username=\"seconduser\", email=\"two@email.com\", password=\"seconduser\", country='br')\n# u3 = User.signup(username=\"thirduser\", email=\"three@email.com\", password=\"thirduser\", country='ca')\n# u4 = User.signup(username=\"fourthuser\", email=\"four@email.com\", password=\"fourthuser\", country='us')\n# u5 = User.signup(username=\"fifthuser\", email=\"five@email.com\", password=\"fifthuser\", country='ae')\n\n\n# db.session.add_all([u1, u2, u3, u4, u5])\n# db.session.commit()\n\n\n\n# # Add sample feeds\n# f1 = Feed(user_id=1, name=\"my likes\")\n# f2 = Feed(user_id=1, name=\"sports only\")\n# f3 = Feed(user_id=2, name=\"likes\")\n# f4 = Feed(user_id=2, name=\"keep eyes on\")\n# f5 = Feed(user_id=3, name=\"first feed\")\n\n# db.session.add_all([f1, f2, f3, f4, f5])\n# db.session.commit()\n\n\n# # Add sample boards\n# b1 = Board(user_id=1, name=\"must read\")\n# b2 = Board(user_id=1, name=\"read later\")\n# b3 = Board(user_id=3, name=\"second read\")\n# b4 = Board(user_id=3, name=\"reading list\")\n# b5 = Board(user_id=2, name=\"my board\")\n\n# db.session.add_all([b1, b2, b3, b4, b5])\n# db.session.commit()","repo_name":"prkshadhkr/news","sub_path":"seed.py","file_name":"seed.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"37350053898","text":"age = int(input('Veuillez saisir votre age svp !'))\r\npermis = int(input('Veuillez saisir depuis combien temps vous aviez votre permis !'))\r\naccident = int(input('Votre nombre daccidents !'))\r\nfidélité = int(input('Depuis combien de temps êtes vous chez nous?'))\r\n\r\ntarifs = ['rouge','orange','vert','bleu']\r\n\r\ncompteur = 0\r\n\r\ncompteur+=1 if age >=25 else compteur\r\ncompteur+=1 if permis >=2 else compteur\r\ncompteur -= accident\r\nvalide = compteur >= 0 and fidélité >= 5\r\n\r\ncompteur += 1 if valide else 0\r\nprint (compteur)\r\nprint (f'Vous avez accès au tarif {tarifs[compteur]}') if compteur >= 0 else print('Vous êtes refusé !')","repo_name":"Weldarn/algo_python","sub_path":"exo_python/exo6.5.py","file_name":"exo6.5.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"28734957744","text":"def is_dicom_file(filepath):\n \"\"\"\n Tries to read the file using dicom.read_file,\n if the file exists and dicom.read_file does not raise\n and Exception returns True. False otherwise.\n\n :param filepath: str\n Path to DICOM file\n\n :return: bool\n \"\"\"\n if not os.path.exists(filepath):\n raise IOError('File {} not found.'.format(filepath))\n\n filename = os.path.basename(filepath)\n if filename == 'DICOMDIR':\n return False\n\n try:\n _ = dicom.read_file(filepath)\n except Exception as exc:\n log.debug('Checking if {0} was a DICOM, but returned '\n 'False.'.format(filepath))\n return False\n\n return True","repo_name":"MichaelFu1998-create/security_scanning","sub_path":"codesearchnet/codesearchnet_20572.py","file_name":"codesearchnet_20572.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"46"} +{"seq_id":"73227116618","text":"\"\"\"added instructor evaluation models\n\nRevision ID: 3e414d100b03\nRevises: e2288893e298\nCreate Date: 2023-12-20 07:17:09.350865\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '3e414d100b03'\ndown_revision = 'e2288893e298'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('eduqa_instructor_evaluation_categories',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('category', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('eduqa_instructor_evaluation_choices',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('score', sa.Numeric(), nullable=True),\n sa.Column('label', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('eduqa_instructor_evaluation_items',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('number', sa.Integer(), nullable=True),\n sa.Column('question', sa.Text(), nullable=False),\n sa.Column('note', sa.Text(), nullable=True),\n sa.Column('category_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['category_id'], ['eduqa_instructor_evaluation_categories.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('eduqa_instructor_evaluations',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('course_id', sa.Integer(), nullable=True),\n sa.Column('instructor_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['course_id'], ['eduqa_courses.id'], ),\n sa.ForeignKeyConstraint(['instructor_id'], ['eduqa_course_instructors.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('eduqa_instructor_evaluation_results',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('evaluation_id', sa.Integer(), nullable=True),\n sa.Column('choice_id', sa.Integer(), nullable=True),\n sa.Column('evaluation_item_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['choice_id'], ['eduqa_instructor_evaluation_choices.id'], ),\n sa.ForeignKeyConstraint(['evaluation_id'], ['eduqa_instructor_evaluations.id'], ),\n sa.ForeignKeyConstraint(['evaluation_item_id'], ['eduqa_instructor_evaluation_items.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('eduqa_instructor_evaluation_results')\n op.drop_table('eduqa_instructor_evaluations')\n op.drop_table('eduqa_instructor_evaluation_items')\n op.drop_table('eduqa_instructor_evaluation_choices')\n op.drop_table('eduqa_instructor_evaluation_categories')\n # ### end Alembic commands ###\n","repo_name":"MUMT-IT/mis2018","sub_path":"migrations/versions/3e414d100b03_added_instructor_evaluation_models.py","file_name":"3e414d100b03_added_instructor_evaluation_models.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"46"}