diff --git "a/4857.jsonl" "b/4857.jsonl" new file mode 100644--- /dev/null +++ "b/4857.jsonl" @@ -0,0 +1,604 @@ +{"seq_id":"70205350691","text":"\"\"\"Write a program that displays the list of exams passed by a student, with their marks.\nThere is a file, classes.txt, which contains the names of all the courses provided in\nthe educational institution (a US college), the content of which will look like this:\nCSC1\nCSC2\nCSC46\nCSC151\nMTH121\n...\nThen, for each course, a file is available (whose name is equal to the course code\nfollowed by .txt) which lists the students who passed the related exam and contains the student identification numbers\n(ID) and grades, such as this, which could be the\nCSC2.txt file:\n11234 A–\n12547 B\n16753 B+\n21886 C\n...\nWrite a program that asks the user for the identification (ID) of a student and\ndisplays the list of exams that that student has passed, with the relative marks\nobtained, as in this example:\nStudent ID 16753\nCSC2 B+\nMTH121 C+\nCHN1 A\nPHY50 A–\n(7.28 M)\"\"\"\n\nFILENAME = \"classes.txt\"\nOSERROR = \"OSError Opening File: \"\n\ndef open_file(filename):\n try:\n with open(filename, \"r\") as file:\n return file.read().splitlines()\n except OSError as problem:\n print(f\"{OSERROR}{problem}\")\n exit(1)\n\ndef invalid(student_id):\n return False\n\ndef ask_for_student_id():\n student_id = input(\"Insert Identification Number: \")\n if invalid(student_id):\n print(\"Invalid Identification Number. Try again\")\n return ask_for_student_id()\n return student_id\n\ndef data_base(filename):\n data = {}\n courses = open_file(filename)\n for course in courses:\n scores = open_file(f\"{course}.txt\")\n course_scores = []\n for score in scores:\n tupl = score.split()\n course_scores.append((tupl[0], tupl[1]))\n data[course] = course_scores\n return data\n\ndef search_id(data, student_id):\n print_string = f\"{student_id}\\n\"\n for course in data:\n for i in data[course]:\n if i[0] == student_id:\n print_string = print_string + f\"{course} \" + i[1] + \"\\n\"\n return print_string\n\ndef main():\n student_id = ask_for_student_id()\n data = data_base(FILENAME)\n print(search_id(data, student_id))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ValentinDeLuca/CS","sub_path":"Exercise6Lab10.py","file_name":"Exercise6Lab10.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"12901570516","text":"def perfect_binary_tree(data):\n middle = len(data)//2\n root = data[middle]\n lft_array = data[:middle]\n if len(lft_array) > 1:\n perfect_binary_tree(lft_array)\n elif len(lft_array) == 1:\n print(lft_array[0])\n rgt_array = data[middle+1:]\n if len(rgt_array) > 1:\n perfect_binary_tree(rgt_array)\n elif len(rgt_array) == 1:\n print(rgt_array[0])\n print(root)\ndata = []\ntry:\n while True:\n tmp = input()\n data.append(tmp)\nexcept EOFError:\n pass\nperfect_binary_tree(data)\n\nimport cProfile\nfrom numpy import random\nn = 10000\na= random.randint(100000000000000000, size=n)\n\ndata1 = []\ndata1.append(a)","repo_name":"Toan211/CS112_AlgorithmAnalyse","sub_path":"pefect_balance_tree.py","file_name":"pefect_balance_tree.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"7668702753","text":"\n## -------------- 建模规划 --------------------\n\n# 1.单个特征构建组合、评价组合\n# 2.单个特征直接建模预测收益/排序,并构建组合、评价组合,评价环节效果\n# 3.单个特征,构建特征工程,再直接建模,构建组合,评价组合,评价环节效果\n# 4.单特征构建特征工程,测试特征工程每个单特征的效果\n# 5.特征工程后,建模对比单特征与特征工程后效果的差异\n# 6.增加特征,进行特征显式交叉,观察对比交叉后的建模效果\n# 7.特征工程与特征交叉后,用集成模型通过进行切割式特征交叉来实现样本分类\n# 8.DSIN神经网络建模\n# 9.AlphaGo深度学习算法\n# 10.ALphaZero深度强化学习算法\n## -------------------------------------------\n\nimport os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n# from multiprocessing import Pool, Manager, cpu_count\n# import matplotlib.pyplot as plt\nimport utils\nfrom utils import gl_dict,compute_forward_returns,get_clean_factor\nimport tears\nfrom sklearn.model_selection import KFold,StratifiedKFold,cross_validate,cross_val_score,cross_val_predict\nfrom sklearn.ensemble import RandomForestRegressor,RandomForestClassifier,GradientBoostingClassifier\nfrom sklearn import svm,linear_model\nfrom multiprocessing import Pool, Manager, cpu_count\nimport sm\n\n\ndef add_little(_data, ftr_i):\n _data[ftr_i] = _data[ftr_i] + [x/10000000*_data[ftr_i].values[0] if _data[ftr_i].values[0] > 0 else x/10000000 for x in range(0,len(_data))]\n return _data\n\ndef make_factor_different(factor, ftr_i):\n factor = factor.groupby(ftr_i).apply(lambda y:add_little(y, ftr_i))\n return factor\n\ndef get_factor_price(df_ftrs1, ftr):\n df_ftr = df_ftrs1.loc[:, [\"date\", \"asset\", \"factor_price\"] + ftr].dropna(axis=0)\n\n # for ftr_i in ftr:\n # factors = df_ftr.loc[:, [\"date\", \"asset\"] + ftr].groupby(\"date\").apply(lambda x: make_factor_different(x, ftr_i)).set_index([\"date\", \"asset\"])\n\n factors = df_ftr.loc[:, [\"date\", \"asset\"] + ftr].set_index([\"date\", \"asset\"])\n prices = df_ftr.loc[:, [\"date\", \"asset\", \"factor_price\"]].pivot(columns=\"asset\", values=\"factor_price\", index=\"date\")\n return factors, prices\n\n\ndef get_factor_data(factors, prices):\n quantiles = 7\n periods = (1, 3, 5, 10, 20)\n filter_zscore = 20\n cumulative_returns = True\n groupby_labels = None\n groupby = None\n bins = None\n binning_by_group = False\n max_loss = 0.35\n zero_aware = False\n forward_returns = compute_forward_returns(factors, prices, periods, filter_zscore, cumulative_returns)\n\n factor_data = get_clean_factor(factors, forward_returns, groupby=groupby,\n groupby_labels=groupby_labels,\n quantiles=quantiles, bins=bins,\n binning_by_group=binning_by_group,\n max_loss=max_loss, zero_aware=zero_aware)\n return factor_data\n\n\ndef get_one_label(data):\n\n print(\"date:{}\".format(data.date.values[0]))\n for ftr in [\"1D\", \"3D\", \"5D\", \"10D\", \"20D\"]:\n tmp = np.percentile(data[ftr], 85)\n data[ftr + \"_label\"] = [1 if x > tmp else 0 for x in data[ftr]]\n data[ftr] = (data[ftr] - data[ftr].mean()) / data[ftr].std()\n return data\n\ndef get_clsfy_label(factor_data):\n factor_data = factor_data.groupby(\"date\").apply(lambda x: get_one_label(x))\n return factor_data\n\n\n# def make_factor_different(factor):\n# factor = factor.groupby(\"factor_value\").apply(lambda y:add_little(y))\n# return factor\n\n\ndef ftr_test(df_ftrs1, ftr, _dict):\n\n df_ftr = df_ftrs1.loc[:, [\"date\", \"asset\", ftr, \"factor_price\"]].rename(columns={ftr: \"factor_value\"}).dropna(\n axis=0)\n factors = df_ftr.loc[:, [\"date\", \"asset\", \"factor_value\"]].groupby(\"date\").apply(lambda x: make_factor_different(x,ftr)).set_index([\"date\", \"asset\"])\n # factors = df_ftr.loc[:, [\"date\", \"asset\", \"factor_value\"]].set_index([\"date\", \"asset\"])\n prices = df_ftr.loc[:, [\"date\", \"asset\", \"factor_price\"]].pivot(columns=\"asset\", values=\"factor_price\",\n index=\"date\")\n\n\ndef get_rtn(_data):\n try:\n ptl = _data.groupby(\"factor_quantile\").apply(lambda x: x[[\"1D\", \"3D\", \"5D\", \"10D\", \"20D\"]].mean())\n except:\n a = 0\n return ptl\n\n\ndef get_nv(_data, cc=\"3D\"):\n nv = np.cumsum(_data.sort_values(by=\"date\")[cc]) # [[\"1D\",\"3D\",\"5D\",\"10D\",\"20D\"]])\n nv = nv.reset_index(drop=True).reset_index(drop=False)\n return nv\n\nimport matplotlib.pyplot as plt\n\n\ndef stddz(_data):\n\n def _stddz(_data1):\n return (_data1 - _data1.mean()) / _data1.std()\n data_sttdz = _data.apply(lambda y: _stddz(y))\n\n return data_sttdz\n\n\ndef plot_factors(df_sttdz_FE_dm_tt, df_ftrs1, ftr):\n\n factors = df_sttdz_FE_dm_tt.loc[:, [ftr]].rename(columns={ftr: \"factor_value\"}).dropna(axis=0)\n factors_price = pd.merge(factors, df_ftrs1.loc[:, [\"date\", \"asset\", \"factor_price\"]].set_index([\"date\", \"asset\"]),\n left_index=True, right_index=True)\n prices = factors_price.loc[:, [\"factor_price\"]].reset_index(drop=False).pivot(columns=\"asset\",\n values=\"factor_price\", index=\"date\")\n\n gl_dict[\"NAME_FTR\"] = ftr\n gl_dict[\"RTN_EXIST\"] = False\n\n path_new = \"./factors_info_eng/\"\n path_ftr = \"./factors_info_eng/\" + gl_dict[\"NAME_FTR\"] + \"/\"\n path_cache = path_ftr + \"DATA_CACHE/\"\n path_plot = path_ftr + \"PLOTS/\"\n path_stat = path_new + \"STAT_FUT/\"\n path_plot_together = path_stat + \"PLOTS_TOGATHER/\"\n for _path in [path_new, path_ftr, path_cache, path_plot, path_stat, path_plot_together]:\n if not os.path.exists(_path):\n os.mkdir(_path)\n\n gl_dict[\"PATH_FTR\"] = path_ftr\n gl_dict[\"PATH_CACHE\"] = path_cache\n gl_dict[\"PATH_PLOT\"] = path_plot\n gl_dict[\"PATH_PLOT_TOGATHER\"] = path_plot_together\n\n values_num = len(factors.factor_value.drop_duplicates())\n if values_num <= 7:\n quantiles = values_num\n else:\n quantiles = 5\n\n periods = (1, 3, 5, 10, 20)\n filter_zscore = 20\n cc = \"3D\"\n cumulative_returns = True\n groupby_labels = None\n groupby = None\n bins = None\n binning_by_group = False\n max_loss = 0.35\n zero_aware = False\n forward_returns = compute_forward_returns(factors, prices, periods, filter_zscore, cumulative_returns)\n\n if values_num <= 7:\n factor_data = pd.merge(forward_returns, factors, left_index=True, right_index=True)\n factor_data[\"factor_quantile\"] = factor_data.factor_value + 1\n else:\n factor_data = get_clean_factor(factors, forward_returns, groupby=groupby,\n groupby_labels=groupby_labels,\n quantiles=quantiles, bins=bins,\n binning_by_group=binning_by_group,\n max_loss=max_loss, zero_aware=zero_aware)\n factor_data.columns = [\"1D\", \"3D\", \"5D\", \"10D\", \"20D\", \"factor_value\", \"factor_quantile\"]\n df_ptl_rtn = factor_data.groupby(\"date\").apply(lambda x: get_rtn(x)).reset_index(drop=False)\n df_nv = df_ptl_rtn.groupby(\"factor_quantile\").apply(lambda x: get_nv(x, cc))\n df_nv = df_nv.reset_index(drop=False)\n df_nv = df_nv.pivot(columns=\"factor_quantile\", index=\"index\", values=cc)\n df_nv[\"dif\" + str(quantiles) + \"_1\"] = df_nv[quantiles] - df_nv[1.0]\n df_nv[\"dif1_\" + str(quantiles)] = df_nv[1.0] - df_nv[quantiles]\n for col in df_nv.columns:\n plt.plot(df_nv[col], label=str(col))\n plt.legend(loc='upper left', prop={'size': 9})\n plt.title(ftr)\n plt.savefig(path_stat + ftr + \"_\" + cc + \".png\")\n plt.close()\n\n df_nv.to_csv(ftr + \".csv\")\n df_nv\n # data = utils.get_clean_factor_and_forward_returns(factors, prices, quantiles=5, periods=(1, 3, 5, 10, 20))\n # tears.create_full_tear_sheet(data)\n\n\nif __name__ == \"__main__\":\n\n\n # 1 数据准备\n # path = \"H:\\\\AlphaLens_hyt_20230112\\\\future\\\\\"\n path = \"D:\\\\work\\\\projects\\\\AlphaLens_hyt_20230112\\\\future\\\\\"\n df_ftrs1 = pd.read_feather(path + \"df_ftrs1.feather\")\n df_ftrs1[\"date\"] = df_ftrs1[\"date\"].apply(lambda x: datetime.strptime(str(x), \"%Y-%m-%d\"))\n ftrs1 = df_ftrs1.columns[:-3].tolist()\n\n feat_lst = [\"overnight_mom_20\", \"overnight_mom_200\", \"sharpe_20\", \"rtn_10\", \"sharpe_10\", \"intraday_mom_10\", \"mk_rto_20\", \"rtn2md_10\", \"overnight_mom_10\", \"posi_40\", \"std_200\"]\n # factors, prices = get_factor_price(df_ftrs1, feat_lst)\n # factors.reset_index(drop=False).to_feather(\"factors.feather\")\n # prices.reset_index(drop=False).to_feather(\"prices.feather\")\n #\n # factors = pd.read_feather(\"factors.feather\").set_index([\"date\", \"asset\"])\n # prices = pd.read_feather(\"prices.feather\").set_index([\"date\"])\n # for _i in range(len(feat_lst)):\n # print(_i)\n # ftr_i = feat_lst[_i]\n # factor_data = get_factor_data(factors[ftr_i], prices).reset_index(drop=False)\n # if _i == 0:\n # factors_data = factor_data.rename(columns={\"factor\": ftr_i, \"factor_quantile\": ftr_i + \"__quantile\"})\n # else:\n # tmp = factor_data.rename(columns={\"factor\": ftr_i, \"factor_quantile\": ftr_i + \"__quantile\"}).loc[:, [\"date\", \"asset\", ftr_i, ftr_i + \"__quantile\"]]\n # factors_data = pd.merge(factors_data, tmp, on=[\"date\", \"asset\"], how=\"left\")\n #\n # factors_data = get_clsfy_label(factors_data)\n # factors_data.to_feather(\"feats_data.feather\")\n\n factors_data = pd.read_feather(\"feats_data.feather\")\n\n factors_data_stddz = factors_data[[\"date\", \"asset\"] + feat_lst].groupby(\"date\").apply(lambda x: stddz(x.set_index([\"date\", \"asset\"])))\n factors_data_stddz.index = factors_data_stddz.index.droplevel(0)\n factors_data = factors_data_stddz.reset_index(drop=False)\n\n # from feature_engineer import FeatureEngineer\n # FE = FeatureEngineer()\n # df_method_4th = factors_data[[\"date\", \"asset\"] + feat_lst].groupby(\"date\").apply(lambda x: FE._4th_method(x.set_index([\"date\", \"asset\"]), trans_type=1))\n # df_method_4th.index = df_method_4th.index.droplevel(0)\n #\n # df_FE_dm = factors_data[[\"date\", \"asset\"] + feat_lst].groupby(\"date\").apply(lambda x: FE.get_my_dummies(x.set_index([\"date\", \"asset\"]), cuts=5)).fillna(0)\n # df_FE_dm.index = df_FE_dm.index.droplevel(0)\n #\n # df_sttdz_FE_dm_tt = pd.concat([factors_data.set_index([\"date\", \"asset\"]), df_method_4th, df_FE_dm], axis=1)\n #\n # for _var in [\"1D\",\"3D\",\"5D\",\"10D\",\"20D\"]:\n # df_sttdz_FE_dm_tt[_var] = factors_data.set_index([\"date\",\"asset\"])[_var]\n #\n # df_sttdz_FE_dm_tt.reset_index(drop=False).to_feather(\"df_sttdz_FE_dm_tt.feather\")\n #\n df_sttdz_FE_dm_tt = pd.read_feather(\"df_sttdz_FE_dm_tt.feather\").set_index([\"date\", \"asset\"])\n #\n # # 单进程\n # for ftr in df_sttdz_FE_dm_tt.columns:\n # print(\"ftr:{}\".format(ftr))\n # plot_factors(df_sttdz_FE_dm_tt, df_ftrs1, ftr)\n #\n # # 进程池\n # poo = Pool(cpu_count()-1)\n # for ftr in df_sttdz_FE_dm_tt.columns:\n # print(\"ftr:{}\".format(ftr))\n # try:\n # plot_factors(df_sttdz_FE_dm_tt, df_ftrs1, ftr)\n # except:\n # print(\" --------- sth wrong --------- \")\n # poo.apply_async(plot_factors, (df_sttdz_FE_dm_tt, df_ftrs1, ))\n # poo.close()\n # poo.join()\n\n df_sttdz_FE_dm_tt = df_sttdz_FE_dm_tt.dropna().fillna(0).reset_index().set_index(\"date\")\n ftr = df_sttdz_FE_dm_tt.columns.tolist()[1:-5]\n date_lst = df_sttdz_FE_dm_tt.index.get_level_values(0).drop_duplicates().sort_values().tolist()\n total_valid = 2\n train_n = 250\n valid_n = int(train_n / total_valid)\n test_n = 1\n\n days_pred = 5\n label = str(days_pred) + \"D\"\n\n for date_i in range(train_n + valid_n + days_pred + test_n, len(date_lst)-1):\n print(\"date:{}\".format(date_lst[date_i]))\n train_valid_days = date_lst[date_i-train_n-test_n:date_i-test_n-days_pred]\n test_days = date_lst[date_i-test_n:date_i]\n\n data_test = df_sttdz_FE_dm_tt.loc[test_days, :]\n data_train_valid = df_sttdz_FE_dm_tt.loc[train_valid_days, :]\n\n\n\n\n\n\n kfold = KFold(n_splits=valid_n)\n\n model_score_dict = {}\n my_model = RandomForestRegressor(n_estimators=80)\n\n tmp = cross_validate(my_model, data_train_valid[ftr], data_train_valid[label], cv=kfold, scoring=['r2', 'neg_mean_squared_error'], error_score='raise')\n model_score_dict[\"RF\"] = {\"test_r2\": tmp[\"test_r2\"].mean(), \"test_neg_mean_squared_error\": tmp[\"test_neg_mean_squared_error\"].mean()}\n\n my_model = svm.SVR()\n tmp = cross_validate(my_model, data_train_valid[ftr], data_train_valid[label], cv=kfold, scoring=['r2', 'neg_mean_squared_error'], error_score='raise')\n model_score_dict[\"SVR\"] = {\"test_r2\": tmp[\"test_r2\"].mean(), \"test_neg_mean_squared_error\": tmp[\"test_neg_mean_squared_error\"].mean()}\n\n my_model = linear_model.LinearRegression()\n tmp = cross_validate(my_model, data_train_valid[ftr], data_train_valid[label], cv=kfold, scoring=['r2', 'neg_mean_squared_error'], error_score='raise')\n model_score_dict[\"linear\"] = {\"test_r2\": tmp[\"test_r2\"].mean(), \"test_neg_mean_squared_error\": tmp[\"test_neg_mean_squared_error\"].mean()}\n min_impurity_split = None,\n S_kfold = StratifiedKFold(n_splits=valid_n)\n\n\n my_model = RandomForestClassifier(n_estimators=100, criterion='gini', max_depth=None, min_samples_split=2,\n min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto',\n max_leaf_nodes=None, min_impurity_decrease=0.0,\n bootstrap=True, oob_score=False, n_jobs=None, random_state=None,\n verbose=0, warm_start=False, class_weight=None, ccp_alpha=0.0,\n max_samples=None)\n\n tmp = cross_validate(my_model, data_train_valid[ftr], data_train_valid[label], cv=S_kfold,\n scoring=['neg_log_loss','roc_auc'], error_score='raise')\n model_score_dict[\"RF_c\"] = {\"neg_log_loss\": tmp[\"test_neg_log_loss\"].mean(), \"roc_auc\": tmp[\"test_roc_auc\"].mean()}\n\n # my_model = svm.SVC()\n # tmp = cross_validate(my_model, data_train_valid[ftr], data_train_valid[label], cv=S_kfold,\n # scoring=['neg_log_loss','roc_auc'], error_score='raise')\n # model_score_dict[\"SVR_C\"] = {\"neg_log_loss\": tmp[\"test_neg_log_loss\"].mean(), \"roc_auc\": tmp[\"test_roc_auc\"].mean()}\n\n my_model = linear_model.LogisticRegression()\n tmp = cross_validate(my_model, data_train_valid[ftr], data_train_valid[label], cv=S_kfold,\n scoring=['neg_log_loss','roc_auc'], error_score='raise')\n model_score_dict[\"Logistic_C\"] = {\"neg_log_loss\": tmp[\"test_neg_log_loss\"].mean(), \"roc_auc\": tmp[\"test_roc_auc\"].mean()}\n\n my_model = GradientBoostingClassifier()\n tmp = cross_validate(my_model, data_train_valid[ftr], data_train_valid[label], cv=S_kfold, scoring=['neg_log_loss', 'roc_auc'], error_score='raise')\n model_score_dict[\"GB_C\"] = {\"neg_log_loss\": tmp[\"test_neg_log_loss\"].mean(), \"roc_auc\": tmp[\"test_roc_auc\"].mean()}\n\n print(model_score_dict)\n\n # 1.1 样本构建:N天收益和排序,0-1标记,训练、验证、测试数据集划分和利用\n\n\n # 1.单个特征构建组合、评价组合\n","repo_name":"huangyunteng/project_1","sub_path":"源码/project_3-modeling_simple/future_engineering_and_modeling.py","file_name":"future_engineering_and_modeling.py","file_ext":"py","file_size_in_byte":15626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"8059664750","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cacao', '0013_auto_20151202_1501'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='section',\n options={'ordering': ['guide__number', 'peso'], 'verbose_name': 'Seccion', 'verbose_name_plural': 'Secciones'},\n ),\n ]\n","repo_name":"CacaoMovil/guia-de-cafe-django","sub_path":"cacao_app/cacao/migrations/0014_auto_20160429_0808.py","file_name":"0014_auto_20160429_0808.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"9941723250","text":"# import openpyxl\nimport pypinyin\nimport codecs\n\n\ndef temp1(word):\n if not word.isalpha():\n return None\n if len(word) <= 8:\n return word\n\n\ndef eff_large(word):\n if word.isalpha():\n return word\n return False\n\n\nalphabet = [chr(i) for i in range(97, 123)]\nalphabet.extend([chr(i) for i in range(65, 91)])\ndef chinese(word):\n word = ''.join([w for w in word if not w.isdigit()])\n word = ''.join([w for w in word if w not in alphabet])\n word = word.replace(\" \", \"\")\n if len(word) == 1:\n return False\n return word\n\n\ndef to_pinyin(word):\n pinyin = pypinyin.lazy_pinyin(word, style=pypinyin.Style.TONE3)\n for i in pinyin:\n if i.isalpha():\n return False\n pinyin = ','.join(pinyin)\n return f\"{pinyin}|{word}\"\n\n\ndef to_lazy_pinyin(word):\n pinyin = pypinyin.lazy_pinyin(word)\n return ''.join(pinyin)\n\n\ndef get_word_list(filename):\n with codecs.open(filename, 'r', encoding='utf-8') as f:\n word_list = f.read().split()\n return word_list\n\n\ndef remove_repeat(filename):\n word_list = get_word_list(filename)\n word_list = list(set(word_list))\n word_list.sort()\n with codecs.open(filename, 'w', encoding=\"utf-8\") as fw:\n for word in word_list:\n fw.write(f\"{word}\\n\")\n\n\ndef transfer(filename, new_name=None, ext_name=None):\n word_list = get_word_list(filename)\n\n new = filename.rsplit('.', 1)\n new.insert(1, ext_name or 'transed')\n new_file = new_name or '.'.join(new)\n with codecs.open(new_file, 'w', encoding=\"utf-8\") as fw:\n for word in word_list:\n word = to_pinyin(word)\n if word:\n fw.write(f\"{word}\\n\")\n print(word)\n remove_repeat(new_file)\n\n\ndef tran_from_exel(filename):\n wb = openpyxl.load_workbook(filename)\n ws = wb.active\n for row in range(1, ws.max_row + 1):\n print(ws.cell(row, 1).value)\n\n\n# tran_from_exel(\"chinese_words.xlsx\")\ntransfer(\"chinese_words.txt\", ext_name='pinyin')\n# remove_repeat(\"chinese_words.pinyin.no.tune.txt\")\n","repo_name":"371114769/Python","sub_path":"PasswordGenerator/raw_wordfiles/WordsTransfer.py","file_name":"WordsTransfer.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"72548306531","text":"class Typeahead:\n \"\"\"\n @param: dict: A dictionary of words dict\n \"\"\"\n\n def __init__(self, dict):\n typeahead = {}\n for word in dict:\n for i in range(len(word)):\n for j in range(i + 1, len(word) + 1):\n key = word[i:j]\n if key not in typeahead:\n typeahead[key] = set()\n typeahead[key].add(word)\n self.typeahead = typeahead\n\n \"\"\"\n @param: str: a string\n @return: a list of words\n \"\"\"\n\n def search(self, str):\n if str not in self.typeahead:\n return []\n\n return list(self.typeahead[str])\n\n\n\"\"\"\narray 去重 加速\n\"\"\"\nclass Typeahead:\n \"\"\"\n @param: dict: A dictionary of words dict\n \"\"\"\n def __init__(self, dict):\n # do initialize if necessary\n self.mp = {}\n for s in dict:\n l = len(s)\n for i in range(l):\n for j in range(i + 1, l + 1):\n tmp = s[i:j]\n if tmp not in self.mp:\n self.mp[tmp] = [s]\n elif self.mp[tmp][-1] != s:\n self.mp[tmp].append(s)\n\n # @param word: a string\n # @return a list of words\n def search(self, word):\n # write your code here\n if word not in self.mp:\n return []\n else:\n return self.mp[word]","repo_name":"Julian-Chu/leetcode_python","sub_path":"lintcode/lintcode231.py","file_name":"lintcode231.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"31680328710","text":"from docx import Document\nfrom bing_image_downloader import downloader\nimport os\nfrom PIL import Image\nfrom docx.shared import Inches\nimport nltk\nimport bs4 as bs\nimport urllib.request\nimport re\nimport heapq\nimport requests\nfrom bs4 import BeautifulSoup\nfrom nltk import tokenize\nfrom operator import itemgetter\nimport math\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize \n\n\ndocument = Document()\n\nparagraph_format = document.styles['Normal'].paragraph_format\nparagraph_format.space_before = 16\n\n\ndocument.add_heading('Welcome to your English activity worksheet')\n\ndocument.add_heading('Activity 1', \n\tlevel = 3)\n\nparagraph = document.add_paragraph('Talk about each of the images (What, where, who, how, why). Use your imagination.')\n\n################# - Generate your images\n\nstring = input(\"Search query: \")\n\nnumber_photos = input('How many photos? ')\n\nfolder_name = input('Choose your folder name: ')\n\ndownloader.download(string, limit= int(number_photos), \n output_dir=folder_name, adult_filter_off=True, \n force_replace=False, timeout=60, verbose=True)\n\nprint(\"Photos saved in\", folder_name,\"/\",string)\n\nextensions = ('jpeg', 'png')\n\ndirectory = \"{}/{}\".format(folder_name, string)\n\nfiles_in_directory = os.listdir(directory)\n\nfiltered_files = [file for file in files_in_directory if file.endswith(extensions)]\n\nfor file in filtered_files:\n path_to_file = os.path.join(directory, file)\n os.remove(path_to_file)\n\n\nfiles_in_directory_filtered = os.listdir(directory)\n\nfor file in files_in_directory_filtered:\n\tdocument.add_picture(directory + '/' + file, width = Inches(6.0))\n\tprint(\n\n )\n# img = Image.open(directory + '/' + file)\n# img.show()\n\n\n######################################## - text\n\ndocument.add_heading('Activity 2', \n\tlevel = 3)\n\nwiki_article = input(\"Choose your wikipedia article:\")\n\nurl = wiki_article\nresult = requests.get(url)\ndoc = BeautifulSoup(result.text, \"html.parser\")\n\ntitle = doc.find('span', class_ = 'mw-headline')\nsummary_text = title.find_previous('p').find_previous_siblings('p')\n\ntext = \"\"\nfor x in summary_text:\n x = str(x.get_text())\n x = re.sub(r'\\[[0-9]*\\]',' ',x)\n text += x\n\n\n# Generate verbs / nouns / adjectives\n\ntypeofwords = input('What vocabulary would you like to practice? ')\n\ntext_token = word_tokenize(text)\n\ntext_token = nltk.pos_tag(text_token)\n\ndocument.add_heading('Activity 2.1: ', level = 4)\n\ndocument.add_paragraph('''Review the below vocabulary. Write some example\nphrases. Think about how it can be used in the different tenses (past, present, future, present perfect simple),\nwith affirmations, negations, and questions, and for different subjects. \n ''')\n\n\nif typeofwords == 'verbs':\n verbs = [item for item in text_token if item[1].startswith('VBN')]\n verb_list = \"\"\n for verb in verbs:\n verb_list += verb[0] + \", \"\n verb_list = verb_list.replace(\" \", \" \").split(\",\") \n document.add_paragraph(verb_list[:10])\nelif typeofwords == 'nouns':\n Nouns = [item for item in text_token if item[1].startswith('N')]\n Noun_list = \"\"\n for noun in Nouns:\n Noun_list += noun[0] + \", \"\n Noun_list = Noun_list.replace(\" \", \" \").split(\",\") \n document.add_paragraph(Noun_list[:5])\nelif typeofwords == 'adjectvies':\n adjectives = [item for item in text_token if item[1].startswith('J')]\n adjectives_list = \"\"\n for adjective in adjectives:\n adjectives_list += adjective[0] + \", \"\n adjectives_list = adjectives_list.replace(\" \", \" \").split(\",\") \n document.add_paragraph(adjectives_list[:5])\n\n\ndocument.add_heading('Activity 2.2: ', level = 4)\n\ndocument.add_paragraph('Read the below article. After each paragraph tell yourself or write a short summary of what you have read.')\n\ndocument.add_paragraph(text)\n\ndocument.save('word_doc_{}.docx'.format(string))\n\n# titles = []\n# for x in doc.find_all('span', class_ = 'mw-headline'):\n# if x.string == 'External links':\n# continue\n# if x.string == 'References':\n# continue\n# if x.string == 'See also':\n# continue\n# if x.string == 'Further reading':\n# continue\n# titles.append(x.string)\n\n# for title in titles:\n# document.add_heading(title, level =4)\n \n# paragraphs = title.find_all_next('p', limit = 2)\n# for p in paragraphs: \n# text = str(p.get_text())\n# text = re.sub(r'\\[[0-9]*\\]',' ',text)\n# document.add_paragraph(text)\n# print(\n\n# \t)\n\n\n\n\n\n\n\n\n","repo_name":"JamesAttwood1910/Wiki_Language_App","sub_path":"main_.py","file_name":"main_.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"36520540021","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index),\n path('home/', views.home, name='kopu'),\n path('about/', views.about),\n path('contact/', views.contact),\n path('display/', views.display),\n path('emp/', views.emp),\n path('emp-show/', views.emp_show),\n path('emp-edit/', views.emp_edit), \n path('emp-update/', views.emp_update), \n path('emp-delete/', views.emp_destroy),\n path('lyrics/', views.lyrics),\n path('lyrics-show/', views.lyrics_show),\n path('lyrics-edit/', views.lyrics_edit),\n path('lyrics-update/', views.lyrics_update),\n path('lyrics-delete/', views.lyrics_destroy),\n]","repo_name":"coders-for-org/django_test_org","sub_path":"aboutus/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"35161132297","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nfrom matplotlib.patches import Rectangle\nimport pandas as pd\n\nfrom utils import HOUSES, COLOR\n\ndf = pd.read_csv('../../assets/dataset_train.csv', index_col=0)\nfeatures = df.columns[5:].to_list()\ndf = df.drop(df.iloc[:, 1:5], axis=1)\n\ndf_houses = {\n \"Gryffindor\": df[df[\"Hogwarts House\"] == \"Gryffindor\"],\n \"Slytherin\": df[df[\"Hogwarts House\"] == \"Slytherin\"],\n \"Ravenclaw\": df[df[\"Hogwarts House\"] == \"Ravenclaw\"],\n \"Hufflepuff\": df[df[\"Hogwarts House\"] == \"Hufflepuff\"]\n}\n\nfig = plt.figure(figsize=(12, 9))\naxs = fig.subplots(nrows=3, ncols=5)\nfor i, feature in enumerate(features):\n for h in HOUSES:\n axs[i // 5][i % 5].hist(df_houses[h][feature].dropna(), bins=20, color=COLOR[h], label=h, alpha=0.5)\n axs[i // 5][i % 5].set_title(feature)\n axs[i // 5][i % 5].set_xlabel(\"Value\")\n axs[i // 5][i % 5].set_ylabel(\"Frequency\")\n\n# Remove empty subplots\naxs[2, 3].axis('off')\naxs[2, 4].axis('off')\n\n# Put margins between subplots\nplt.subplots_adjust(left=0.1, bottom=0.05, right=0.9, top=0.95)\nplt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n# Add legend\nhandles = [Rectangle((0,0),3,3,color=c,ec=\"k\") for c in COLOR.values()]\nplt.legend(handles, HOUSES)\n\nplt.show()","repo_name":"mgkgng/dslr","sub_path":"src/visualization/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"34188266896","text":"#!/usr/bin/python3\n\nfrom gensim.models import Word2Vec\nfrom progressbar import ProgressBar\nfrom time import sleep\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nimport os\n\ntrain_txt = open(\"../Data_1/train.txt\",\"r\")\ntest_txt = open(\"../Data_1/test.txt\",\"r\")\ntrain_labels_txt = open(\"../Data_1/labels/train_targets.txt\")\ntest_labels_txt = open(\"../Data_1/labels/test_targets.txt\")\n\ntrain_list = train_txt.readlines()\ntest_list = test_txt.readlines()\ntrain_labels_list = train_labels_txt.readlines()\ntest_labels_list = test_labels_txt.readlines()\n\ntrain = []\ntest = []\ntraining_data = []\ntesting_data = []\ntraining_labels = []\ntesting_labels = [] \n\nfor sentence in train_list:\n\tbuff = []\n\tx = sentence.split()\n\tfor i in x:\n\t\tbuff.append(i)\n\ttrain.append(buff)\n\nfor sentence in test_list:\n\tbuff = []\n\tx = sentence.split()\n\tfor i in x:\n\t\tbuff.append(i)\n\ttest.append(buff)\n\nmax_len = 0\npadding = 'Khrystarlite'\n\nfor i in train:\n\tif(len(i) > max_len):\n\t\tmax_len = len(i)\n\nfor i in test:\n\tif(len(i) > max_len):\n\t\tmax_len = len(i)\n\nfor i in train:\n\twhile(len(i) < max_len):\n\t\ti.append(padding)\n\nfor i in test:\n\twhile(len(i) < max_len):\n\t\ti.append(padding)\n\n\ntraining_vec = Word2Vec(train, workers=4, min_count=1)\ntesting_vec = Word2Vec(test, workers=4, min_count=1)\n\nfor word in train:\n\ttraining_data.append(training_vec[word])\n\nfor word in test:\n\ttesting_data.append(testing_vec[word])\n\nfor label in train_labels_list:\n\ttraining_labels.append(int(label))\n\nfor label in test_labels_list:\n\ttesting_labels.append(int(label))\n\n\n\nfor i in range(80):\n\tdf = training_data[i]\n\tnp.savetxt('../Data_1/vector/train/{0}_fpVectWordRep_trainDAT.txt'.format(i+1), df, delimiter='\\t')\n\nfor i in range(20):\n\tdf = testing_data[i]\n\tnp.savetxt('../Data_1/vector/test/{0}_fpVectWordRep_testDAT.txt'.format(i+1), df, delimiter='\\t')\n\n\n\n\n\n","repo_name":"tchua0927/XorNN","sub_path":"extractors/fpVectWordRep.py","file_name":"fpVectWordRep.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"70703966691","text":"# from PyQt5.QtCore import QTimer\n# from random import randint\n#\n# from PyQt5.QtCore import Qt\n# from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton\n# # global timer\n# global button\n# # def timer_tick(button):\n# #\n# # x=button.x()\n# # y=button.y()\n# # button.move(x+5,y+5)\n# class ArtemiyLuchshiy(QMainWindow):\n# def keyPressEvent(self, event):\n# global button\n# if event.key() == Qt.Key_Space:\n# button = QPushButton()\n# button.setFixedSize(100,100)\n# window.layout().addWidget(button)\n# button.move(randint(0,1000),randint(0,1000))\n# # self.close()\n\n\n\n# def clicked(button):\n# global timer\n# timer=QTimer()\n# timer.setInterval(10)\n# timer.timeout.connect(lambda:timer_tick(button))\n# timer.start()\ndef fibbonachi(a):\n if a in(1,2):\n return 1\n return(fibbonachi(a-1)+fibbonachi(a-2))\n\ndef factorial(a):\n if a==0:\n return 1\n return factorial(a-1) * a\n\n\n m,n=input().split()\n m=int(m)\n n=int(n)\n list= input().split()\n sumt=0\n curr=1\n sumt1=0\n for next in list:\n next = int(next)\n if next>curr:\n res=next-curr\n sumt+=res\n elif next ==curr:\n res=0\n sumt+=res\n else:\n res=m-(curr-next)\n sumt += res\n curr=next\n print(sumt)\n # app=QApplication([])\n #\n # window=ArtemiyLuchshiy()\n # window.show()\n # window.setFixedSize(1000,1000)\n\n\n # button.setText('Нажми меня')\n # button.clicked.connect(lambda:(button))\n print(fibbonachi(35))\n print(factorial(35))","repo_name":"Lureterio1922/server","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"33086453578","text":"\"\"\"uWSGI related tools.\"\"\"\n\nimport os\nimport pwd\nimport stat\n\nfrom .. import package\nfrom .. import system\nfrom .. import utils\n\nfrom . import base\n\n\nclass Uwsgi(base.Installer):\n \"\"\"uWSGI installer.\"\"\"\n\n appname = \"uwsgi\"\n packages = {\n \"deb\": [\"uwsgi\", \"uwsgi-plugin-python3\"],\n \"rpm\": [\"uwsgi\", \"uwsgi-plugin-python36\"],\n }\n\n def get_socket_path(self, app):\n \"\"\"Return socket path.\"\"\"\n if package.backend.FORMAT == \"deb\":\n return \"/run/uwsgi/app/{}_instance/socket\".format(app)\n return \"/run/uwsgi/{}_instance.sock\".format(app)\n\n def get_template_context(self, app):\n \"\"\"Additionnal variables.\"\"\"\n context = super(Uwsgi, self).get_template_context()\n if package.backend.FORMAT == \"deb\":\n uwsgi_plugin = \"python3\"\n else:\n uwsgi_plugin = \"python36\"\n context.update({\n \"app_user\": self.config.get(app, \"user\"),\n \"app_venv_path\": self.config.get(app, \"venv_path\"),\n \"app_instance_path\": (\n self.config.get(app, \"instance_path\")),\n \"uwsgi_socket_path\": self.get_socket_path(app),\n \"uwsgi_plugin\": uwsgi_plugin,\n })\n return context\n\n def get_config_dir(self):\n \"\"\"Return appropriate configuration directory.\"\"\"\n if package.backend.FORMAT == \"deb\":\n return os.path.join(self.config_dir, \"apps-available\")\n return \"{}.d\".format(self.config_dir)\n\n def _enable_config_debian(self, dst):\n \"\"\"Enable config file.\"\"\"\n link = os.path.join(\n self.config_dir, \"apps-enabled\", os.path.basename(dst))\n if os.path.exists(link):\n return\n os.symlink(dst, link)\n\n def _setup_config(self, app):\n \"\"\"Common setup code.\"\"\"\n context = self.get_template_context(app)\n src = self.get_file_path(\"{}.ini.tpl\".format(app))\n dst = os.path.join(\n self.get_config_dir(), \"{}_instance.ini\".format(app))\n utils.copy_from_template(src, dst, context)\n return dst\n\n def _setup_modoboa_config(self):\n \"\"\"Custom modoboa configuration.\"\"\"\n dst = self._setup_config(\"modoboa\")\n if package.backend.FORMAT == \"deb\":\n self._enable_config_debian(dst)\n else:\n system.add_user_to_group(\n \"uwsgi\", self.config.get(\"modoboa\", \"user\"))\n utils.exec_cmd(\"chmod -R g+w {}/media\".format(\n self.config.get(\"modoboa\", \"instance_path\")))\n utils.exec_cmd(\"chmod -R g+w {}/pdfcredentials\".format(\n self.config.get(\"modoboa\", \"home_dir\")))\n pattern = (\n \"s/emperor-tyrant = true/emperor-tyrant = false/\")\n utils.exec_cmd(\n \"perl -pi -e '{}' /etc/uwsgi.ini\".format(pattern))\n\n def _setup_automx_config(self):\n \"\"\"Custom automx configuration.\"\"\"\n dst = self._setup_config(\"automx\")\n if package.backend.FORMAT == \"deb\":\n self._enable_config_debian(dst)\n else:\n system.add_user_to_group(\n \"uwsgi\", self.config.get(\"automx\", \"user\"))\n pattern = (\n \"s/emperor-tyrant = true/emperor-tyrant = false/\")\n utils.exec_cmd(\n \"perl -pi -e '{}' /etc/uwsgi.ini\".format(pattern))\n\n def post_run(self):\n \"\"\"Additionnal tasks.\"\"\"\n self._setup_modoboa_config()\n if self.config.getboolean(\"automx\", \"enabled\"):\n self._setup_automx_config()\n\n def restart_daemon(self):\n \"\"\"Restart daemon process.\"\"\"\n # Temp. fix for CentOS\n if utils.dist_name().startswith(\"centos\"):\n pw = pwd.getpwnam(\"uwsgi\")\n utils.mkdir(\n \"/run/uwsgi\",\n stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |\n stat.S_IROTH | stat.S_IXOTH,\n pw[2], pw[3]\n )\n code, output = utils.exec_cmd(\"service uwsgi status\")\n action = \"start\" if code else \"restart\"\n utils.exec_cmd(\"service uwsgi {}\".format(action))\n system.enable_service(self.get_daemon_name())\n","repo_name":"modoboa/modoboa-installer","sub_path":"modoboa_installer/scripts/uwsgi.py","file_name":"uwsgi.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","stars":233,"dataset":"github-code","pt":"89"} +{"seq_id":"43433049165","text":"from enum import Enum\n\n\nclass CurrencyTypeEnum(str, Enum):\n USD = \"USD\"\n RUB = \"RUB\"\n\n\nclass SubscriptionPlanStatusEnum(str, Enum):\n Active = \"active\"\n Archived = \"archived\"\n\n\nclass SubscriptionStatus(str, Enum):\n Active = \"active\"\n Expired = \"expired\"\n Cancelled = \"cancelled\"\n\n\nclass TransactionStatus(str, Enum):\n Processing = \"processing\"\n Paid = \"paid\"\n Declined = \"declined\"\n\n\nclass RefundStatus(str, Enum):\n Processing = \"processing\"\n Approved = \"approved\"\n Declined = \"declined\"\n","repo_name":"netshy/Yookassa-Billing","sub_path":"subscription_api/src/db/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"34868610856","text":"try:\n from collections.abc import MutableSequence\nexcept ImportError:\n # Required for python versions < 3.9\n from collections import MutableSequence\n\n\nclass Line(object):\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\n def __repr__(self):\n return 'Line(start=%s, end=%s)' % (self.start, self.end)\n\n def __eq__(self, other):\n if not isinstance(other, Line):\n return NotImplemented\n return self.start == other.start and self.end == other.end\n\n def __ne__(self, other):\n if not isinstance(other, Line):\n return NotImplemented\n return not self == other\n\n def __getitem__(self, item):\n return self.bpoints()[item]\n\n def __len__(self):\n return 2\n\n # def joins_smoothly_with(self, previous, wrt_parameterization=False):\n # \"\"\"Checks if this segment joins smoothly with previous segment. By\n # default, this only checks that this segment starts moving (at t=0) in\n # the same direction (and from the same positive) as previous stopped\n # moving (at t=1). To check if the tangent magnitudes also match, set\n # wrt_parameterization=True.\"\"\"\n # if wrt_parameterization:\n # return self.start == previous.end and np.isclose(\n # self.derivative(0), previous.derivative(1))\n # else:\n # return self.start == previous.end and np.isclose(\n # self.unit_tangent(0), previous.unit_tangent(1))\n\n def point(self, t):\n \"\"\"returns the coordinates of the Bezier curve evaluated at t.\"\"\"\n distance = self.end - self.start\n return self.start + distance*t\n\n # def length(self, t0=0, t1=1, error=None, min_depth=None):\n # \"\"\"returns the length of the line segment between t0 and t1.\"\"\"\n # return abs(self.end - self.start)*(t1-t0)\n\n # def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,\n # error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):\n # \"\"\"Returns a float, t, such that self.length(0, t) is approximately s.\n # See the inv_arclength() docstring for more details.\"\"\"\n # return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,\n # min_depth=min_depth)\n\n def bpoints(self):\n \"\"\"returns the Bezier control points of the segment.\"\"\"\n return self.start, self.end\n\n def poly(self, return_coeffs=False):\n \"\"\"returns the line as a Polynomial object.\"\"\"\n p = self.bpoints()\n coeffs = ([p[1] - p[0], p[0]])\n # if return_coeffs:\n # return coeffs\n # else:\n # return np.poly1d(coeffs)\n return coeffs\n\n # def derivative(self, t=None, n=1):\n # \"\"\"returns the nth derivative of the segment at t.\"\"\"\n # assert self.end != self.start\n # if n == 1:\n # return self.end - self.start\n # elif n > 1:\n # return 0\n # else:\n # raise ValueError(\"n should be a positive integer.\")\n\n # def unit_tangent(self, t=None):\n # \"\"\"returns the unit tangent of the segment at t.\"\"\"\n # assert self.end != self.start\n # dseg = self.end - self.start\n # return dseg/abs(dseg)\n\n # def normal(self, t=None):\n # \"\"\"returns the (right hand rule) unit normal vector to self at t.\"\"\"\n # return -1j*self.unit_tangent(t)\n\n # def curvature(self, t):\n # \"\"\"returns the curvature of the line, which is always zero.\"\"\"\n # return 0\n\n # # def icurvature(self, kappa):\n # # \"\"\"returns a list of t-values such that 0 <= t<= 1 and\n # # seg.curvature(t) = kappa.\"\"\"\n # # if kappa:\n # # raise ValueError(\"The .icurvature() method for Line elements will \"\n # # \"return an empty list if kappa is nonzero and \"\n # # \"will raise this exception when kappa is zero as \"\n # # \"this is true at every point on the line.\")\n # # return []\n\n # def reversed(self):\n # \"\"\"returns a copy of the Line object with its orientation reversed.\"\"\"\n # return Line(self.end, self.start)\n\n # def intersect(self, other_seg, tol=None):\n # \"\"\"Finds the intersections of two segments.\n # returns a list of tuples (t1, t2) such that\n # self.point(t1) == other_seg.point(t2).\n # Note: This will fail if the two segments coincide for more than a\n # finite collection of points.\n # tol is not used.\"\"\"\n # if isinstance(other_seg, Line):\n # assert other_seg.end != other_seg.start and self.end != self.start\n # assert self != other_seg\n # # Solve the system [p1-p0, q1-q0]*[t1, t2]^T = q0 - p0\n # # where self == Line(p0, p1) and other_seg == Line(q0, q1)\n # a = (self.start.real, self.end.real)\n # b = (self.start.imag, self.end.imag)\n # c = (other_seg.start.real, other_seg.end.real)\n # d = (other_seg.start.imag, other_seg.end.imag)\n # denom = ((a[1] - a[0])*(d[0] - d[1]) -\n # (b[1] - b[0])*(c[0] - c[1]))\n # if np.isclose(denom, 0):\n # return []\n # t1 = (c[0]*(b[0] - d[1]) -\n # c[1]*(b[0] - d[0]) -\n # a[0]*(d[0] - d[1]))/denom\n # t2 = -(a[1]*(b[0] - d[0]) -\n # a[0]*(b[1] - d[0]) -\n # c[0]*(b[0] - b[1]))/denom\n # if 0 <= t1 <= 1 and 0 <= t2 <= 1:\n # return [(t1, t2)]\n # return []\n # elif isinstance(other_seg, QuadraticBezier):\n # t2t1s = bezier_by_line_intersections(other_seg, self)\n # return [(t1, t2) for t2, t1 in t2t1s]\n # elif isinstance(other_seg, CubicBezier):\n # t2t1s = bezier_by_line_intersections(other_seg, self)\n # return [(t1, t2) for t2, t1 in t2t1s]\n # elif isinstance(other_seg, Arc):\n # t2t1s = other_seg.intersect(self)\n # return [(t1, t2) for t2, t1 in t2t1s]\n # elif isinstance(other_seg, Path):\n # raise TypeError(\n # \"other_seg must be a path segment, not a Path object, use \"\n # \"Path.intersect().\")\n # else:\n # raise TypeError(\"other_seg must be a path segment.\")\n\n # def bbox(self):\n # \"\"\"returns the bounding box for the segment in the form\n # (xmin, xmax, ymin, ymax).\"\"\"\n # xmin = min(self.start.real, self.end.real)\n # xmax = max(self.start.real, self.end.real)\n # ymin = min(self.start.imag, self.end.imag)\n # ymax = max(self.start.imag, self.end.imag)\n # return xmin, xmax, ymin, ymax\n\n # def cropped(self, t0, t1):\n # \"\"\"returns a cropped copy of this segment which starts at\n # self.point(t0) and ends at self.point(t1).\"\"\"\n # return Line(self.point(t0), self.point(t1))\n\n # def split(self, t):\n # \"\"\"returns two segments, whose union is this segment and which join at\n # self.point(t).\"\"\"\n # pt = self.point(t)\n # return Line(self.start, pt), Line(pt, self.end)\n\n # def radialrange(self, origin, return_all_global_extrema=False):\n # \"\"\"returns the tuples (d_min, t_min) and (d_max, t_max) which minimize\n # and maximize, respectively, the distance d = |self.point(t)-origin|.\"\"\"\n # return bezier_radialrange(self, origin,\n # return_all_global_extrema=return_all_global_extrema)\n\n # def rotated(self, degs, origin=None):\n # \"\"\"Returns a copy of self rotated by `degs` degrees (CCW) around the\n # point `origin` (a complex number). By default `origin` is either\n # `self.point(0.5)`, or in the case that self is an Arc object,\n # `origin` defaults to `self.center`.\"\"\"\n # return rotate(self, degs, origin=origin)\n\n # def translated(self, z0):\n # \"\"\"Returns a copy of self shifted by the complex quantity `z0` such\n # that self.translated(z0).point(t) = self.point(t) + z0 for any t.\"\"\"\n # return translate(self, z0)\n\n # def scaled(self, sx, sy=None, origin=0j):\n # \"\"\"Scale transform. See `scale` function for further explanation.\"\"\"\n # return scale(self, sx=sx, sy=sy, origin=origin)\n\n\nclass CubicBezier(object):\n # For compatibility with old pickle files.\n # _length_info = {'length': None, 'bpoints': None, 'error': None,\n # 'min_depth': None}\n\n def __init__(self, start, control1, control2, end):\n self.start = start\n self.control1 = control1\n self.control2 = control2\n self.end = end\n\n # used to know if self._length needs to be updated\n # self._length_info = {'length': None, 'bpoints': None, 'error': None,\n # 'min_depth': None}\n\n def __repr__(self):\n return 'CubicBezier(start=%s, control1=%s, control2=%s, end=%s)' % (\n self.start, self.control1, self.control2, self.end)\n\n def __eq__(self, other):\n if not isinstance(other, CubicBezier):\n return NotImplemented\n return self.start == other.start and self.end == other.end \\\n and self.control1 == other.control1 \\\n and self.control2 == other.control2\n\n def __ne__(self, other):\n if not isinstance(other, CubicBezier):\n return NotImplemented\n return not self == other\n\n def __getitem__(self, item):\n return self.bpoints()[item]\n\n def __len__(self):\n return 4\n\n # def is_smooth_from(self, previous, warning_on=True):\n # \"\"\"[Warning: The name of this method is somewhat misleading (yet kept\n # for compatibility with scripts created using svg.path 2.0). This\n # method is meant only for d string creation and should not be used to\n # check for kinks. To check a segment for differentiability, use the\n # joins_smoothly_with() method instead.]\"\"\"\n # if warning_on:\n # warn(_is_smooth_from_warning)\n # if isinstance(previous, CubicBezier):\n # return (self.start == previous.end and\n # (self.control1 - self.start) == (\n # previous.end - previous.control2))\n # else:\n # return self.control1 == self.start\n\n # def joins_smoothly_with(self, previous, wrt_parameterization=False):\n # \"\"\"Checks if this segment joins smoothly with previous segment. By\n # default, this only checks that this segment starts moving (at t=0) in\n # the same direction (and from the same positive) as previous stopped\n # moving (at t=1). To check if the tangent magnitudes also match, set\n # wrt_parameterization=True.\"\"\"\n # if wrt_parameterization:\n # return self.start == previous.end and np.isclose(\n # self.derivative(0), previous.derivative(1))\n # else:\n # return self.start == previous.end and np.isclose(\n # self.unit_tangent(0), previous.unit_tangent(1))\n\n def point(self, t):\n \"\"\"Evaluate the cubic Bezier curve at t using Horner's rule.\"\"\"\n # algebraically equivalent to\n # P0*(1-t)**3 + 3*P1*t*(1-t)**2 + 3*P2*(1-t)*t**2 + P3*t**3\n # for (P0, P1, P2, P3) = self.bpoints()\n return self.start + t*(\n 3*(self.control1 - self.start) + t*(\n 3*(self.start + self.control2) - 6*self.control1 + t*(\n -self.start + 3*(self.control1 - self.control2) + self.end\n )))\n\n # def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):\n # \"\"\"Calculate the length of the path up to a certain position\"\"\"\n # if t0 == 0 and t1 == 1:\n # if self._length_info['bpoints'] == self.bpoints() \\\n # and self._length_info['error'] >= error \\\n # and self._length_info['min_depth'] >= min_depth:\n # return self._length_info['length']\n\n # # using scipy.integrate.quad is quick\n # if _quad_available:\n # s = quad(lambda tau: abs(self.derivative(tau)), t0, t1,\n # epsabs=error, limit=1000)[0]\n # else:\n # s = segment_length(self, t0, t1, self.point(t0), self.point(t1),\n # error, min_depth, 0)\n\n # if t0 == 0 and t1 == 1:\n # self._length_info['length'] = s\n # self._length_info['bpoints'] = self.bpoints()\n # self._length_info['error'] = error\n # self._length_info['min_depth'] = min_depth\n # return self._length_info['length']\n # else:\n # return s\n\n # def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,\n # error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):\n # \"\"\"Returns a float, t, such that self.length(0, t) is approximately s.\n # See the inv_arclength() docstring for more details.\"\"\"\n # return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,\n # min_depth=min_depth)\n\n def bpoints(self):\n \"\"\"returns the Bezier control points of the segment.\"\"\"\n return self.start, self.control1, self.control2, self.end\n\n def poly(self, return_coeffs=False):\n \"\"\"Returns a the cubic as a Polynomial object.\"\"\"\n p = self.bpoints()\n coeffs = (-p[0] + 3*(p[1] - p[2]) + p[3],\n 3*(p[0] - 2*p[1] + p[2]),\n 3*(-p[0] + p[1]),\n p[0])\n # if return_coeffs:\n # return coeffs\n # else:\n # return np.poly1d(coeffs)\n return coeffs\n\n # def derivative(self, t, n=1):\n # \"\"\"returns the nth derivative of the segment at t.\n # Note: Bezier curves can have points where their derivative vanishes.\n # If you are interested in the tangent direction, use the unit_tangent()\n # method instead.\"\"\"\n # p = self.bpoints()\n # if n == 1:\n # return 3*(p[1] - p[0])*(1 - t)**2 + 6*(p[2] - p[1])*(1 - t)*t + 3*(\n # p[3] - p[2])*t**2\n # elif n == 2:\n # return 6*(\n # (1 - t)*(p[2] - 2*p[1] + p[0]) + t*(p[3] - 2*p[2] + p[1]))\n # elif n == 3:\n # return 6*(p[3] - 3*(p[2] - p[1]) - p[0])\n # elif n > 3:\n # return 0\n # else:\n # raise ValueError(\"n should be a positive integer.\")\n\n # def unit_tangent(self, t):\n # \"\"\"returns the unit tangent vector of the segment at t (centered at\n # the origin and expressed as a complex number). If the tangent\n # vector's magnitude is zero, this method will find the limit of\n # self.derivative(tau)/abs(self.derivative(tau)) as tau approaches t.\"\"\"\n # return bezier_unit_tangent(self, t)\n\n # def normal(self, t):\n # \"\"\"returns the (right hand rule) unit normal vector to self at t.\"\"\"\n # return -1j * self.unit_tangent(t)\n\n # def curvature(self, t):\n # \"\"\"returns the curvature of the segment at t.\"\"\"\n # return segment_curvature(self, t)\n\n # # def icurvature(self, kappa):\n # # \"\"\"returns a list of t-values such that 0 <= t<= 1 and\n # # seg.curvature(t) = kappa.\"\"\"\n # # z = self.poly()\n # # x, y = real(z), imag(z)\n # # dx, dy = x.deriv(), y.deriv()\n # # ddx, ddy = dx.deriv(), dy.deriv()\n # #\n # # p = kappa**2*(dx**2 + dy**2)**3 - (dx*ddy - ddx*dy)**2\n # # return polyroots01(p)\n\n # def reversed(self):\n # \"\"\"returns a copy of the CubicBezier object with its orientation\n # reversed.\"\"\"\n # new_cub = CubicBezier(self.end, self.control2, self.control1,\n # self.start)\n # if self._length_info['length']:\n # new_cub._length_info = self._length_info\n # new_cub._length_info['bpoints'] = (\n # self.end, self.control2, self.control1, self.start)\n # return new_cub\n\n # def intersect(self, other_seg, tol=1e-12):\n # \"\"\"Finds the intersections of two segments.\n # returns a list of tuples (t1, t2) such that\n # self.point(t1) == other_seg.point(t2).\n # Note: This will fail if the two segments coincide for more than a\n # finite collection of points.\"\"\"\n # if isinstance(other_seg, Line):\n # return bezier_by_line_intersections(self, other_seg)\n # elif (isinstance(other_seg, QuadraticBezier) or\n # isinstance(other_seg, CubicBezier)):\n # assert self != other_seg\n # longer_length = max(self.length(), other_seg.length())\n # return bezier_intersections(self, other_seg,\n # longer_length=longer_length,\n # tol=tol, tol_deC=tol)\n # elif isinstance(other_seg, Arc):\n # t2t1s = other_seg.intersect(self)\n # return [(t1, t2) for t2, t1 in t2t1s]\n # elif isinstance(other_seg, Path):\n # raise TypeError(\n # \"other_seg must be a path segment, not a Path object, use \"\n # \"Path.intersect().\")\n # else:\n # raise TypeError(\"other_seg must be a path segment.\")\n\n # def bbox(self):\n # \"\"\"returns the bounding box for the segment in the form\n # (xmin, xmax, ymin, ymax).\"\"\"\n # return bezier_bounding_box(self)\n\n # def split(self, t):\n # \"\"\"returns two segments, whose union is this segment and which join at\n # self.point(t).\"\"\"\n # bpoints1, bpoints2 = split_bezier(self.bpoints(), t)\n # return CubicBezier(*bpoints1), CubicBezier(*bpoints2)\n\n # def cropped(self, t0, t1):\n # \"\"\"returns a cropped copy of this segment which starts at\n # self.point(t0) and ends at self.point(t1).\"\"\"\n # return CubicBezier(*crop_bezier(self, t0, t1))\n\n # def radialrange(self, origin, return_all_global_extrema=False):\n # \"\"\"returns the tuples (d_min, t_min) and (d_max, t_max) which minimize\n # and maximize, respectively, the distance d = |self.point(t)-origin|.\"\"\"\n # return bezier_radialrange(self, origin,\n # return_all_global_extrema=return_all_global_extrema)\n\n # def rotated(self, degs, origin=None):\n # \"\"\"Returns a copy of self rotated by `degs` degrees (CCW) around the\n # point `origin` (a complex number). By default `origin` is either\n # `self.point(0.5)`, or in the case that self is an Arc object,\n # `origin` defaults to `self.center`.\"\"\"\n # return rotate(self, degs, origin=origin)\n\n # def translated(self, z0):\n # \"\"\"Returns a copy of self shifted by the complex quantity `z0` such\n # that self.translated(z0).point(t) = self.point(t) + z0 for any t.\"\"\"\n # return translate(self, z0)\n\n # def scaled(self, sx, sy=None, origin=0j):\n # \"\"\"Scale transform. See `scale` function for further explanation.\"\"\"\n # return scale(self, sx=sx, sy=sy, origin=origin)\n\n\nclass Path(MutableSequence):\n \"\"\"A Path is a sequence of path segments\"\"\"\n\n # Put it here, so there is a default if unpickled.\n _closed = False\n _start = None\n _end = None\n\n def __init__(self, *segments, **kw):\n self._segments = list(segments)\n self._length = None\n self._lengths = None\n if 'closed' in kw:\n self.closed = kw['closed'] # DEPRECATED\n if self._segments:\n self._start = self._segments[0].start\n self._end = self._segments[-1].end\n else:\n self._start = None\n self._end = None\n\n def __getitem__(self, index):\n return self._segments[index]\n\n def __setitem__(self, index, value):\n self._segments[index] = value\n self._length = None\n self._start = self._segments[0].start\n self._end = self._segments[-1].end\n\n def __delitem__(self, index):\n del self._segments[index]\n self._length = None\n self._start = self._segments[0].start\n self._end = self._segments[-1].end\n\n def __iter__(self):\n return self._segments.__iter__()\n\n def __contains__(self, x):\n return self._segments.__contains__(x)\n\n def insert(self, index, value):\n self._segments.insert(index, value)\n self._length = None\n self._start = self._segments[0].start\n self._end = self._segments[-1].end\n\n def reversed(self):\n \"\"\"returns a copy of the Path object with its orientation reversed.\"\"\"\n newpath = [seg.reversed() for seg in self]\n newpath.reverse()\n return Path(*newpath)\n\n def __len__(self):\n return len(self._segments)\n\n def __repr__(self):\n return \"Path({})\".format(\n \",\\n \".join(repr(x) for x in self._segments))\n\n def __eq__(self, other):\n if not isinstance(other, Path):\n return NotImplemented\n if len(self) != len(other):\n return False\n for s, o in zip(self._segments, other._segments):\n if not s == o:\n return False\n return True\n\n def __ne__(self, other):\n if not isinstance(other, Path):\n return NotImplemented\n return not self == other\n\n # def _calc_lengths(self, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):\n # if self._length is not None:\n # return\n\n # lengths = [each.length(error=error, min_depth=min_depth) for each in\n # self._segments]\n # self._length = sum(lengths)\n # self._lengths = [each/self._length for each in lengths]\n\n # def point(self, pos):\n\n # # Shortcuts\n # if pos == 0.0:\n # return self._segments[0].point(pos)\n # if pos == 1.0:\n # return self._segments[-1].point(pos)\n\n # self._calc_lengths()\n # # Find which segment the point we search for is located on:\n # segment_start = 0\n # for index, segment in enumerate(self._segments):\n # segment_end = segment_start + self._lengths[index]\n # if segment_end >= pos:\n # # This is the segment! How far in on the segment is the point?\n # segment_pos = (pos - segment_start)/(\n # segment_end - segment_start)\n # return segment.point(segment_pos)\n # segment_start = segment_end\n\n # def length(self, T0=0, T1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):\n # self._calc_lengths(error=error, min_depth=min_depth)\n # if T0 == 0 and T1 == 1:\n # return self._length\n # else:\n # if len(self) == 1:\n # return self[0].length(t0=T0, t1=T1)\n # idx0, t0 = self.T2t(T0)\n # idx1, t1 = self.T2t(T1)\n # if idx0 == idx1:\n # return self[idx0].length(t0=t0, t1=t1)\n # return (self[idx0].length(t0=t0) +\n # sum(self[idx].length() for idx in range(idx0 + 1, idx1)) +\n # self[idx1].length(t1=t1))\n\n # def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,\n # error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):\n # \"\"\"Returns a float, t, such that self.length(0, t) is approximately s.\n # See the inv_arclength() docstring for more details.\"\"\"\n # return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,\n # min_depth=min_depth)\n\n # def iscontinuous(self):\n # \"\"\"Checks if a path is continuous with respect to its\n # parameterization.\"\"\"\n # return all(self[i].end == self[i+1].start for i in range(len(self) - 1))\n\n # def continuous_subpaths(self):\n # \"\"\"Breaks self into its continuous components, returning a list of\n # continuous subpaths.\n # I.e.\n # (all(subpath.iscontinuous() for subpath in self.continuous_subpaths())\n # and self == concatpaths(self.continuous_subpaths()))\n # )\n # \"\"\"\n # subpaths = []\n # subpath_start = 0\n # for i in range(len(self) - 1):\n # if self[i].end != self[(i+1) % len(self)].start:\n # subpaths.append(Path(*self[subpath_start: i+1]))\n # subpath_start = i+1\n # subpaths.append(Path(*self[subpath_start: len(self)]))\n # return subpaths\n\n # def isclosed(self):\n # \"\"\"This function determines if a connected path is closed.\"\"\"\n # assert len(self) != 0\n # assert self.iscontinuous()\n # return self.start == self.end\n\n # def isclosedac(self):\n # assert len(self) != 0\n # return self.start == self.end\n\n # def _is_closable(self):\n # end = self[-1].end\n # for segment in self:\n # if segment.start == end:\n # return True\n # return False\n\n # @property\n # def closed(self, warning_on=CLOSED_WARNING_ON):\n # \"\"\"The closed attribute is deprecated, please use the isclosed()\n # method instead. See _closed_warning for more information.\"\"\"\n # mes = (\"This attribute is deprecated, consider using isclosed() \"\n # \"method instead.\\n\\nThis attribute is kept for compatibility \"\n # \"with scripts created using svg.path (v2.0). You can prevent \"\n # \"this warning in the future by setting \"\n # \"CLOSED_WARNING_ON=False.\")\n # if warning_on:\n # warn(mes)\n # return self._closed and self._is_closable()\n\n # @closed.setter\n # def closed(self, value):\n # value = bool(value)\n # if value and not self._is_closable():\n # raise ValueError(\"End does not coincide with a segment start.\")\n # self._closed = value\n\n # @property\n # def start(self):\n # if not self._start:\n # self._start = self._segments[0].start\n # return self._start\n\n # @start.setter\n # def start(self, pt):\n # self._start = pt\n # self._segments[0].start = pt\n\n # @property\n # def end(self):\n # if not self._end:\n # self._end = self._segments[-1].end\n # return self._end\n\n # @end.setter\n # def end(self, pt):\n # self._end = pt\n # self._segments[-1].end = pt\n\n def d(self, useSandT=False, use_closed_attrib=False):\n \"\"\"Returns a path d-string for the path object.\n For an explanation of useSandT and use_closed_attrib, see the\n compatibility notes in the README.\"\"\"\n\n if use_closed_attrib:\n self_closed = self.closed(warning_on=False)\n if self_closed:\n segments = self[:-1]\n else:\n segments = self[:]\n else:\n self_closed = False\n segments = self[:]\n\n current_pos = None\n parts = []\n previous_segment = None\n end = self[-1].end\n\n for segment in segments:\n seg_start = segment.start\n # If the start of this segment does not coincide with the end of\n # the last segment or if this segment is actually the close point\n # of a closed path, then we should start a new subpath here.\n if current_pos != seg_start or \\\n (self_closed and seg_start == end and use_closed_attrib):\n parts.append('M {},{}'.format(seg_start.real, seg_start.imag))\n\n if isinstance(segment, Line):\n args = segment.end.real, segment.end.imag\n parts.append('L {},{}'.format(*args))\n elif isinstance(segment, CubicBezier):\n if useSandT and segment.is_smooth_from(previous_segment,\n warning_on=False):\n args = (segment.control2.real, segment.control2.imag,\n segment.end.real, segment.end.imag)\n parts.append('S {},{} {},{}'.format(*args))\n else:\n args = (segment.control1.real, segment.control1.imag,\n segment.control2.real, segment.control2.imag,\n segment.end.real, segment.end.imag)\n parts.append('C {},{} {},{} {},{}'.format(*args))\n # elif isinstance(segment, QuadraticBezier):\n # if useSandT and segment.is_smooth_from(previous_segment,\n # warning_on=False):\n # args = segment.end.real, segment.end.imag\n # parts.append('T {},{}'.format(*args))\n # else:\n # args = (segment.control.real, segment.control.imag,\n # segment.end.real, segment.end.imag)\n # parts.append('Q {},{} {},{}'.format(*args))\n\n # elif isinstance(segment, Arc):\n # args = (segment.radius.real, segment.radius.imag,\n # segment.rotation,int(segment.large_arc),\n # int(segment.sweep),segment.end.real, segment.end.imag)\n # parts.append('A {},{} {} {:d},{:d} {},{}'.format(*args))\n current_pos = segment.end\n previous_segment = segment\n\n if self_closed:\n parts.append('Z')\n\n return ' '.join(parts)\n\n # def joins_smoothly_with(self, previous, wrt_parameterization=False):\n # \"\"\"Checks if this Path object joins smoothly with previous\n # path/segment. By default, this only checks that this Path starts\n # moving (at t=0) in the same direction (and from the same positive) as\n # previous stopped moving (at t=1). To check if the tangent magnitudes\n # also match, set wrt_parameterization=True.\"\"\"\n # if wrt_parameterization:\n # return self[0].start == previous.end and self.derivative(\n # 0) == previous.derivative(1)\n # else:\n # return self[0].start == previous.end and self.unit_tangent(\n # 0) == previous.unit_tangent(1)\n\n # def T2t(self, T):\n # \"\"\"returns the segment index, `seg_idx`, and segment parameter, `t`,\n # corresponding to the path parameter `T`. In other words, this is the\n # inverse of the `Path.t2T()` method.\"\"\"\n # if T == 1:\n # return len(self)-1, 1\n # if T == 0:\n # return 0, 0\n # self._calc_lengths()\n # # Find which segment self.point(T) falls on:\n # T0 = 0 # the T-value the current segment starts on\n # for seg_idx, seg_length in enumerate(self._lengths):\n # T1 = T0 + seg_length # the T-value the current segment ends on\n # if T1 >= T:\n # # This is the segment!\n # t = (T - T0)/seg_length\n # return seg_idx, t\n # T0 = T1\n\n # assert 0 <= T <= 1\n # raise BugException\n\n # def t2T(self, seg, t):\n # \"\"\"returns the path parameter T which corresponds to the segment\n # parameter t. In other words, for any Path object, path, and any\n # segment in path, seg, T(t) = path.t2T(seg, t) is the unique\n # reparameterization such that path.point(T(t)) == seg.point(t) for all\n # 0 <= t <= 1.\n # Input Note: seg can be a segment in the Path object or its\n # corresponding index.\"\"\"\n # self._calc_lengths()\n # # Accept an index or a segment for seg\n # if isinstance(seg, int):\n # seg_idx = seg\n # else:\n # try:\n # seg_idx = self.index(seg)\n # except ValueError:\n # assert is_path_segment(seg) or isinstance(seg, int)\n # raise\n\n # segment_start = sum(self._lengths[:seg_idx])\n # segment_end = segment_start + self._lengths[seg_idx]\n # T = (segment_end - segment_start)*t + segment_start\n # return T\n\n # def derivative(self, T, n=1):\n # \"\"\"returns the tangent vector of the Path at T (centered at the origin\n # and expressed as a complex number).\n # Note: Bezier curves can have points where their derivative vanishes.\n # If you are interested in the tangent direction, use unit_tangent()\n # method instead.\"\"\"\n # seg_idx, t = self.T2t(T)\n # seg = self._segments[seg_idx]\n # return seg.derivative(t, n=n)/seg.length()**n\n\n # def unit_tangent(self, T):\n # \"\"\"returns the unit tangent vector of the Path at T (centered at the\n # origin and expressed as a complex number). If the tangent vector's\n # magnitude is zero, this method will find the limit of\n # self.derivative(tau)/abs(self.derivative(tau)) as tau approaches T.\"\"\"\n # seg_idx, t = self.T2t(T)\n # return self._segments[seg_idx].unit_tangent(t)\n\n # def normal(self, t):\n # \"\"\"returns the (right hand rule) unit normal vector to self at t.\"\"\"\n # return -1j*self.unit_tangent(t)\n\n # def curvature(self, T):\n # \"\"\"returns the curvature of this Path object at T and outputs\n # float('inf') if not differentiable at T.\"\"\"\n # seg_idx, t = self.T2t(T)\n # seg = self[seg_idx]\n # if np.isclose(t, 0) and (seg_idx != 0 or self.end==self.start):\n # previous_seg_in_path = self._segments[\n # (seg_idx - 1) % len(self._segments)]\n # if not seg.joins_smoothly_with(previous_seg_in_path):\n # return float('inf')\n # elif np.isclose(t, 1) and (seg_idx != len(self) - 1 or self.end==self.start):\n # next_seg_in_path = self._segments[\n # (seg_idx + 1) % len(self._segments)]\n # if not next_seg_in_path.joins_smoothly_with(seg):\n # return float('inf')\n # dz = self.derivative(T)\n # ddz = self.derivative(T, n=2)\n # dx, dy = dz.real, dz.imag\n # ddx, ddy = ddz.real, ddz.imag\n # return abs(dx*ddy - dy*ddx)/(dx*dx + dy*dy)**1.5\n\n # # def icurvature(self, kappa):\n # # \"\"\"returns a list of T-values such that 0 <= T <= 1 and\n # # seg.curvature(t) = kappa.\n # # Note: not implemented for paths containing Arc segments.\"\"\"\n # # assert is_bezier_path(self)\n # # Ts = []\n # # for i, seg in enumerate(self):\n # # Ts += [self.t2T(i, t) for t in seg.icurvature(kappa)]\n # # return Ts\n\n # def area(self):\n # \"\"\"returns the area enclosed by this Path object.\n # Note: negative area results from CW (as opposed to CCW)\n # parameterization of the Path object.\"\"\"\n # assert self.isclosed()\n # area_enclosed = 0\n # for seg in self:\n # x = real(seg.poly())\n # dy = imag(seg.poly()).deriv()\n # integrand = x*dy\n # integral = integrand.integ()\n # area_enclosed += integral(1) - integral(0)\n # return area_enclosed\n\n # def intersect(self, other_curve, justonemode=False, tol=1e-12):\n # \"\"\"returns list of pairs of pairs ((T1, seg1, t1), (T2, seg2, t2))\n # giving the intersection points.\n # If justonemode==True, then returns just the first\n # intersection found.\n # tol is used to check for redundant intersections (see comment above\n # the code block where tol is used).\n # Note: If the two path objects coincide for more than a finite set of\n # points, this code will fail.\"\"\"\n # path1 = self\n # if isinstance(other_curve, Path):\n # path2 = other_curve\n # else:\n # path2 = Path(other_curve)\n # assert path1 != path2\n # intersection_list = []\n # for seg1 in path1:\n # for seg2 in path2:\n # if justonemode and intersection_list:\n # return intersection_list[0]\n # for t1, t2 in seg1.intersect(seg2, tol=tol):\n # T1 = path1.t2T(seg1, t1)\n # T2 = path2.t2T(seg2, t2)\n # intersection_list.append(((T1, seg1, t1), (T2, seg2, t2)))\n # if justonemode and intersection_list:\n # return intersection_list[0]\n\n # # Note: If the intersection takes place at a joint (point one seg ends\n # # and next begins in path) then intersection_list may contain a\n # # redundant intersection. This code block checks for and removes said\n # # redundancies.\n # if intersection_list:\n # pts = [seg1.point(_t1) for _T1, _seg1, _t1 in list(zip(*intersection_list))[0]]\n # indices2remove = []\n # for ind1 in range(len(pts)):\n # for ind2 in range(ind1 + 1, len(pts)):\n # if abs(pts[ind1] - pts[ind2]) < tol:\n # # then there's a redundancy. Remove it.\n # indices2remove.append(ind2)\n # intersection_list = [inter for ind, inter in\n # enumerate(intersection_list) if\n # ind not in indices2remove]\n # return intersection_list\n\n # def bbox(self):\n # \"\"\"returns a bounding box for the input Path object in the form\n # (xmin, xmax, ymin, ymax).\"\"\"\n # bbs = [seg.bbox() for seg in self._segments]\n # xmins, xmaxs, ymins, ymaxs = list(zip(*bbs))\n # xmin = min(xmins)\n # xmax = max(xmaxs)\n # ymin = min(ymins)\n # ymax = max(ymaxs)\n # return xmin, xmax, ymin, ymax\n\n # def cropped(self, T0, T1):\n # \"\"\"returns a cropped copy of the path.\"\"\"\n # assert 0 <= T0 <= 1 and 0 <= T1<= 1\n # assert T0 != T1\n # assert not (T0 == 1 and T1 == 0)\n\n # if T0 == 1 and 0 < T1 < 1 and self.isclosed():\n # return self.cropped(0, T1)\n\n # if T1 == 1:\n # seg1 = self[-1]\n # t_seg1 = 1\n # i1 = len(self) - 1\n # else:\n # seg1_idx, t_seg1 = self.T2t(T1)\n # seg1 = self[seg1_idx]\n # if np.isclose(t_seg1, 0):\n # i1 = (self.index(seg1) - 1) % len(self)\n # seg1 = self[i1]\n # t_seg1 = 1\n # else:\n # i1 = self.index(seg1)\n # if T0 == 0:\n # seg0 = self[0]\n # t_seg0 = 0\n # i0 = 0\n # else:\n # seg0_idx, t_seg0 = self.T2t(T0)\n # seg0 = self[seg0_idx]\n # if np.isclose(t_seg0, 1):\n # i0 = (self.index(seg0) + 1) % len(self)\n # seg0 = self[i0]\n # t_seg0 = 0\n # else:\n # i0 = self.index(seg0)\n\n # if T0 < T1 and i0 == i1:\n # new_path = Path(seg0.cropped(t_seg0, t_seg1))\n # else:\n # new_path = Path(seg0.cropped(t_seg0, 1))\n\n # # T1 global_max[0]:\n # global_max = seg_global_max + (seg_idx,)\n # return global_min, global_max\n\n # def rotated(self, degs, origin=None):\n # \"\"\"Returns a copy of self rotated by `degs` degrees (CCW) around the\n # point `origin` (a complex number). By default `origin` is either\n # `self.point(0.5)`, or in the case that self is an Arc object,\n # `origin` defaults to `self.center`.\"\"\"\n # return rotate(self, degs, origin=origin)\n\n # def translated(self, z0):\n # \"\"\"Returns a copy of self shifted by the complex quantity `z0` such\n # that self.translated(z0).point(t) = self.point(t) + z0 for any t.\"\"\"\n # return translate(self, z0)\n\n # def scaled(self, sx, sy=None, origin=0j):\n # \"\"\"Scale transform. See `scale` function for further explanation.\"\"\"\n # return scale(self, sx=sx, sy=sy, origin=origin)\n","repo_name":"CWSchulze/life_line_chart","sub_path":"life_line_chart/SimpleSVGItems.py","file_name":"SimpleSVGItems.py","file_ext":"py","file_size_in_byte":41320,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"89"} +{"seq_id":"29347630730","text":"import numpy as np\n\nimport pandas as pd\nimport xgboost as xgb\n#from xgboost import XGBClassifier\n#from xgboost import XGBRegressor\nfrom select_para import *\nimport warnings\nimport gc\nwarnings.filterwarnings(\"ignore\")\nclass Model_class(object):\n\n def __init__(self,X_train=0, X_test=0, y_train=0, y_test=0,model=\"xgb\"):\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n\n\n def train(self,train_data,label):\n label = label.ravel()\n #best_para = select_parameter(train_data, label)\n\n #self.model = XGBClassifier(**best_para)\n #self.model = XGBClassifier()\n #self.model.fit(train_data, label)\n params = {'objective': 'binary:logistic','max_depth': 10,'eval_metric': 'auc'}\n\n xgbtrain = xgb.DMatrix(train_data, label)\n\n self.model = xgb.train(params=params,dtrain=xgbtrain,verbose_eval=False)\n\n\n def predict(self,test_set):\n xgbtest = xgb.DMatrix(test_set)\n y_pred = self.model.predict(xgbtest)\n\n return y_pred\n\nclass Model_regre(object):\n\n def __init__(self, X_train=0, X_test=0, y_train=0, y_test=0, model=\"xgb\"):\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n\n # if model == \"xgb\":\n # self.reg = XGBRegressor()\n\n def train(self, X_train, y_train):\n params = {'objective': 'reg:linear','eval_metric': 'rmse', 'verbose_eval': False}\n xgbtrain = xgb.DMatrix(X_train, y_train)\n\n self.model = xgb.train(params=params, dtrain=xgbtrain, verbose_eval=False)\n\n\n\n def predict(self,test_set):\n xgbtest = xgb.DMatrix(test_set)\n y_pred = self.model.predict(xgbtest)\n return y_pred\n\n","repo_name":"moonlight1776/competitions","sub_path":"驾驶行为预测驾驶风险/team1_xgb&lgb/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"14808893659","text":"import tweepy, os, json, csv\n\nclass CryptoStream(tweepy.StreamListener):\n\n def __init__(self,symbol):\n super(CryptoStream, self).__init__()\n self.symbol = symbol\n \n # Actions performed when a tweet enters the stream\n def on_status(self, status):\n \n time = status.created_at\n tweetid = status.id_str\n text = status.text\n userid = status.user.id_str\n username = status.user.name\n followers = status.user.followers_count\n retweets = status.retweet_count\n favorites = status.favorite_count\n \n if hasattr(status,'extended_tweet'):\n text = status.extended_tweet['full_text']\n \n data = [time,tweetid,text,userid,username,followers,retweets,favorites]\n \n print(status.text)\n \n here = os.path.dirname(os.path.realpath('twitter.py'))\n \n filepath = os.path.join(here,'{}-tweets.csv'.format(self.symbol))\n \n if('bitcoin') in text:\n with open(filepath,'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(data)\n \n def on_error(self, status_code):\n if status_code == 420:\n #returning False in on_error disconnects the stream\n return False\n ","repo_name":"ethankershner/crypto-price-prediction","sub_path":"data-gatherer/twitter.py","file_name":"twitter.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"3911146831","text":"import logging\nlogger = logging.getLogger(__name__)\n\nfrom odoo import models, fields, api, _\nfrom odoo import tools\nimport datetime as dt\nfrom datetime import datetime\nfrom dateutil import relativedelta\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DF\nimport calendar\nfrom odoo.modules.module import get_module_resource\nfrom odoo.exceptions import ValidationError\nimport base64\n\nclass AssurancePlan(models.Model):\n _name = \"assurance.plan\"\n \n name = fields.Char(string='Plan')\n code = fields.Char(string='Plan Code')\n assurance_partner_id = fields.Many2one('res.partner',string='Assurance Company')\n \nclass DoctorPatientOccupation(models.Model):\n _name = \"doctor.patient.occupation\"\n \n code = fields.Char(string='Code', copy=False)\n name = fields.Char(string='Description')\n\nclass DoctorAdministrativeData(models.Model):\n _name = \"doctor.administrative.data\"\n \n @api.model\n def _default_image(self):\n image_path = get_module_resource('clinica_doctor_data', 'static/src/img', 'default_image.png')\n return tools.image_resize_image_big(base64.b64encode(open(image_path, 'rb').read()))\n \n name = fields.Char(string='Number ID')\n numberid_integer = fields.Integer(string='Number ID for TI or CC Documents')\n document_type = fields.Selection([('cc','CC - ID Document'),('ce','CE - Aliens Certificate'),\n ('pa','PA - Passport'),('rc','RC - Civil Registry'),('ti','TI - Identity Card'),\n ('as','AS - Unidentified Adult'),('ms','MS - Unidentified Minor')], string='Type of Document')\n image = fields.Binary(\"Image\", attachment=True, default=_default_image,\n help=\"This field holds the image used as avatar for this contact, limited to 1024x1024px\", copy=False)\n image_medium = fields.Binary(\"Medium-sized image\", attachment=True, \n help=\"Medium-sized image of this contact. It is automatically \"\\\n \"resized as a 128x128px image, with aspect ratio preserved. \"\\\n \"Use this field in form views or some kanban views.\", copy=False)\n image_small = fields.Binary(\"Small-sized image\", attachment=True, \n help=\"Small-sized image of this contact. It is automatically \"\\\n \"resized as a 64x64px image, with aspect ratio preserved. \"\\\n \"Use this field anywhere a small image is required.\", copy=False)\n first_name = fields.Char(string='First Name')\n first_last_name = fields.Char(string='First Last Name')\n second_name = fields.Char(string='Second Name')\n second_last_name = fields.Char(string='Second Last Name')\n gender = fields.Selection([('male','Male'), ('female','Female')], string='Gender')\n birth_date = fields.Date(string='Birth Date')\n blood_type = fields.Selection([('a_positive','A+'),('a_negative','A-'),\n ('b_positive','B+'),('b_negative','B-'),\n ('ab_positive','AB+'),('ab_negative','AB-'),\n ('o_positive','O+'),('o_negative','O-')], string='Blood Type')\n age = fields.Integer(string='Age', compute='_compute_age_meassure_unit')\n age_meassure_unit = fields.Selection([('1','Years'),('2','Months'),('3','Days')], string='Unit of Measure of Age',\n compute='_compute_age_meassure_unit')\n birth_country_id = fields.Many2one('res.country', string='Country of Birth')\n birth_department_id = fields.Many2one('res.country.state', string='Department of Birth Place')\n birth_city_id = fields.Many2one('res.country.state.city', string='Location/City/Town of Birth')\n birth_district = fields.Char(string='Districts/localties/areas of Birth Place')\n birth_neighborhood = fields.Char(string='Neighborhood of Birth Place')\n birth_address = fields.Text(string=\"Address of Birth Place\")\n residence_country_id = fields.Many2one('res.country', string='Residence Country')\n residence_department_id = fields.Many2one('res.country.state', string='Residence Department')\n residence_city_id = fields.Many2one('res.country.state.city', string='Residence Location/City/Town')\n residence_district = fields.Char(string='Residence Districts/localties/areas')\n residence_neighborhood = fields.Char(string='Residence Neighborhood')\n residence_address = fields.Text(string=\"Residence Address\")\n civil_state = fields.Selection([('separated','Separada/o'),('single','Soltera/o'),('married','Casada/o'),\n ('free_union','Unión libre'),('widow','Viuda/o')], string='Civil Status')\n beliefs = fields.Text(string=\"Beliefs\")\n occupation = fields.Char(string='Occupation')\n profession_id = fields.Many2one('doctor.patient.occupation', string='Profession')\n email = fields.Char(string='Email')\n phone = fields.Char(string='Phone Number')\n mobile = fields.Char('Mobile Number')\n accompany_name = fields.Char(\"Name of the companion\")\n accompany_relationship = fields.Selection([('mother','Mother'),('father','Father'),('grand_father','Grand Father'),\n ('grand_mother','Grand Mother'),('uncle','Uncle'),('aunt','Aunt'),\n ('friend','Friend'),('other','Other')], string=\"Accompany Person's Relationship\")\n other_accompany_relationship = fields.Char(string=\"Other Accompany Person's Relationship\")\n accompany_phone = fields.Char(\"Accompany Person's Phone Number\")\n responsible_name = fields.Char(\"Responsible Person's Name\")\n responsible_relationship = fields.Selection([('mother','Mother'),('father','Father'),('grand_father','Grand Father'),\n ('grand_mother','Grand Mother'),('uncle','Uncle'),('aunt','Aunt'),\n ('friend','Friend'),('other','Other')], string=\"Responsible Person's Relationship\")\n other_responsible_relationship = fields.Char(string=\"Other Responsible Person's Relationship\")\n responsible_phone = fields.Char(\"Responsible Person's Phone Number\")\n father_name = fields.Char(string=\"Father's Name\")\n father_occupation = fields.Char(string=\"Father's Occupation\")\n father_address = fields.Text(string=\"Father's Address\")\n father_phone = fields.Char(string=\"Father's Phone Number\")\n mother_name = fields.Char(string=\"Mother's Name\")\n mother_occupation = fields.Char(string=\"Mother's Occupation\")\n mother_address = fields.Text(string=\"Mother's Address\")\n mother_phone = fields.Char(string=\"Mother's Phone Number\")\n user_type = fields.Selection([('contributory','Contributivo'),('subsidized','Subsidiado'),('linked','Vinculado')], string=\"User Type\")\n primary_payer = fields.Selection([('private_user','Usuario Particular'),('eps','EPS'),\n ('another_insurer','Otra Aseguradora'),('mixed','Pago Mixto')], string=\"Primary Payer\")\n assurance_partner_id = fields.Many2one('res.partner',string='Assurance Company')\n assurance_plan_id = fields.Many2one('assurance.plan', string='Assurer Plans')\n other_assurance_partner_id = fields.Many2one('res.partner',string='Other Assurance Company')\n other_assurance_plan_id = fields.Many2one('assurance.plan', string='Other Assurer Plans')\n partner_id = fields.Many2one('res.partner', copy=False, ondelete='restrict', string='Related Partner', \n help='Partner-related data of administrative data ')\n \n \n @api.multi\n @api.depends('birth_date')\n def _compute_age_meassure_unit(self):\n for data in self:\n if data.birth_date:\n today_datetime = datetime.today()\n today_date = today_datetime.date()\n birth_date_format = datetime.strptime(data.birth_date, DF).date()\n date_difference = today_date - birth_date_format\n difference = int(date_difference.days)\n month_days = calendar.monthrange(today_date.year, today_date.month)[1]\n date_diff = relativedelta.relativedelta(today_date, birth_date_format)\n if difference < 30:\n data.age_meassure_unit = '3'\n data.age = int(date_diff.days)\n elif difference < 365:\n data.age_meassure_unit = '2'\n data.age = int(date_diff.months)\n else:\n data.age_meassure_unit = '1'\n data.age = int(date_diff.years)\n \n def _check_birth_date(self, birth_date):\n warn_msg = '' \n today_datetime = datetime.today()\n today_date = today_datetime.date()\n birth_date_format = datetime.strptime(birth_date, DF).date()\n date_difference = today_date - birth_date_format\n difference = int(date_difference.days) \n if difference < 0: \n warn_msg = _('Invalid birth date!')\n return warn_msg\n \n @api.onchange('birth_date','age_meassure_unit')\n def onchange_birth_date(self):\n if self.age_meassure_unit == '3':\n self.document_type = 'rc'\n if self.birth_date:\n warn_msg = self._check_birth_date(self.birth_date)\n if warn_msg:\n warning = {\n 'title': _('Warning!'),\n 'message': warn_msg,\n }\n return {'warning': warning}\n \n \n @api.onchange('numberid_integer', 'document_type')\n def onchange_numberid_integer(self):\n if self.numberid_integer:\n self.name = str(self.numberid_integer) \n if self.document_type and self.document_type in ['cc','ti'] and self.numberid_integer == 0:\n self.name = str(0)\n \n def _check_email(self, email):\n if not tools.single_email_re.match(email):\n raise ValidationError(_('Invalid Email ! Please enter a valid email address.'))\n else:\n return True\n \n def _check_assign_numberid(self, numberid_integer):\n if numberid_integer == 0:\n raise ValidationError(_('Please enter non zero value for Number ID'))\n else:\n numberid = str(numberid_integer)\n return numberid\n \n @api.multi\n def _check_document_types(self):\n for data in self:\n if data.age_meassure_unit == '3' and data.document_type not in ['rc','ms']:\n raise ValidationError(_(\"You can only choose 'RC' or 'MS' documents, for age less than 1 month.\"))\n if data.age > 17 and data.age_meassure_unit == '1' and data.document_type in ['rc','ms']:\n raise ValidationError(_(\"You cannot choose 'RC' or 'MS' document types for age greater than 17 years.\"))\n if data.age_meassure_unit in ['2','3'] and data.document_type in ['cc','as','ti']:\n raise ValidationError(_(\"You cannot choose 'CC', 'TI' or 'AS' document types for age less than 1 year.\"))\n if data.document_type == 'ms' and data.age_meassure_unit != '3':\n raise ValidationError(_(\"You can only choose 'MS' document for age between 1 to 30 days.\"))\n if data.document_type == 'as' and data.age_meassure_unit == '1' and data.age <= 17:\n raise ValidationError(_(\"You can choose 'AS' document only if the age is greater than 17 years.\"))\n \n @api.multi\n def _get_related_partner_vals(self, vals):\n ## code for updating partner with change in administrative data\n ## administrative data will not get updated with partner changes\n for data in self:\n partner_vals = {}\n if 'first_name' in vals or 'first_last_name' in vals or 'second_name' in vals or 'second_last_name' in vals:\n first_name = data.first_name or ''\n first_last_name = data.first_last_name or ''\n second_name = data.second_name or ''\n second_last_name = data.second_last_name or ''\n if 'first_name' in vals:\n first_name = vals.get('first_name', False) or ''\n partner_vals.update({'x_name1': vals.get('first_name', False)})\n if 'first_last_name' in vals:\n first_last_name = vals.get('first_last_name', False) or ''\n partner_vals.update({'x_name2': vals.get('first_last_name', False)})\n if 'second_name' in vals:\n second_name = vals.get('second_name', False) or ''\n partner_vals.update({'x_lastname1': vals.get('second_name', False)})\n if 'second_last_name' in vals:\n second_last_name = vals.get('second_last_name', False) or ''\n partner_vals.update({'x_lastname2': vals.get('second_last_name', False)})\n nameList = [\n first_name.strip(),\n first_last_name.strip(),\n second_name.strip(),\n second_last_name.strip()\n ]\n formatedList = []\n name = ''\n for item in nameList:\n if item is not '':\n formatedList.append(item)\n name = ' ' .join(formatedList).title()\n partner_vals.update({'name': name})\n if 'birth_date' in vals:\n partner_vals.update({'xbirthday': vals.get('birth_date', False)})\n if 'email' in vals:\n partner_vals.update({'email': vals.get('email', False)})\n if 'phone' in vals:\n partner_vals.update({'phone': vals.get('phone', False)})\n if 'mobile' in vals:\n partner_vals.update({'mobile': vals.get('mobile', False)})\n if 'image' in vals:\n partner_vals.update({'image': vals.get('image', False)})\n if 'residence_district' in vals:\n partner_vals.update({'street2': vals.get('residence_district', False)})\n if 'residence_department_id' in vals:\n partner_vals.update({'state_id': vals.get('residence_department_id', False)})\n if 'residence_country_id' in vals:\n partner_vals.update({'country_id': vals.get('residence_country_id', False)})\n if 'residence_address' in vals:\n partner_vals.update({'street': vals.get('residence_address', False)})\n return partner_vals\n \n @api.model\n def create(self, vals):\n if vals.get('email', False):\n self._check_email(vals.get('email'))\n if vals.get('document_type', False) and vals['document_type'] in ['cc','ti']:\n numberid_integer = 0\n if vals.get('numberid_integer', False):\n numberid_integer = vals['numberid_integer']\n numberid = self._check_assign_numberid(numberid_integer)\n vals.update({'name': numberid})\n if vals.get('birth_date', False):\n warn_msg = self._check_birth_date(vals['birth_date'])\n if warn_msg:\n raise ValidationError(warn_msg)\n tools.image_resize_images(vals)\n res = super(DoctorAdministrativeData, self).create(vals)\n res._check_document_types()\n partner_vals = res._get_related_partner_vals(vals)\n partner_vals.update({'doctype': 1})\n partner = self.env['res.partner'].create(partner_vals)\n res.partner_id = partner.id \n return res\n \n @api.multi\n def write(self, vals):\n if vals.get('email', False):\n self._check_email(vals.get('email'))\n tools.image_resize_images(vals)\n if vals.get('document_type', False) or vals.get('numberid_integer', False):\n if vals.get('document_type', False):\n document_type = vals['document_type']\n else:\n document_type = self.document_type\n if document_type in ['cc','ti']:\n if vals.get('numberid_integer', False):\n numberid_integer = vals['numberid_integer']\n else:\n numberid_integer = self.numberid_integer\n numberid = self._check_assign_numberid(numberid_integer)\n if vals.get('birth_date', False):\n warn_msg = self._check_birth_date(vals['birth_date'])\n if warn_msg:\n raise ValidationError(warn_msg)\n tools.image_resize_images(vals)\n res = super(DoctorAdministrativeData, self).write(vals)\n self._check_document_types()\n if 'first_name' in vals or 'first_last_name' in vals or 'second_name' in vals or 'second_last_name' in vals\\\n or 'birth_date' in vals or 'email' in vals or 'phone' in vals or 'mobile' in vals or 'image' in vals \\\n or 'residence_district' in vals or 'residence_department_id' in vals or 'residence_country_id' in vals or 'residence_address' in vals:\n for data in self:\n if data.partner_id:\n partner_vals = data._get_related_partner_vals(vals)\n data.partner_id.write(partner_vals)\n return res\n\n# vim:expandtab:smartindent:tabstop=2:softtabstop=2:shiftwidth=2:\n","repo_name":"Capriatto/addons-v11","sub_path":"clinica_doctor_data/models/doctor_details.py","file_name":"doctor_details.py","file_ext":"py","file_size_in_byte":17233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"34555562268","text":"#!/usr/bin/python\nimport json\nimport yaml\nimport subprocess\nimport os\nimport sys\n\n\nlab_name = os.getenv('lab_name')\nlocalstack_ip = sys.argv[1]\nprint(\"localstack_ip=\", localstack_ip)\nwd = os.getenv('agent_root_dir') + os.getenv('labs_path') + lab_name\nos.chdir(wd)\njson_file = subprocess.check_output(['terraform', 'output', '-json'])\ntf_output = json.loads(json_file)\n\n# Read all needed info from terraform output\nmcn_mgmt_ip = tf_output['mcn']['value']['mgmt_ip']\nmcn_name = tf_output['mcn']['value']['name']\nbranch_mgmt_ip = tf_output['branch']['value']['mgmt_ip']\nbranch_name = tf_output['branch']['value']['name']\nmcn_host_mgmt_ip = tf_output['mcn-host1']['value']['mgmt_ip']\nbranch_host_mgmt_ip = tf_output['branch-host1']['value']['mgmt_ip']\n\nwith open(os.getenv('agent_root_dir') + os.getenv('openstack_path') +'/testbed_template.yaml') as testbed_file:\n testbed_description = yaml.load(testbed_file, Loader=yaml.FullLoader)\n\"\"\"\nExample for testbed yaml:\nSTATE: AVAILABLE\nENV:\n ORCHESTRATOR:\n name: placeholder\n ip: placeholder_only_valid_for_localstack\n CUSTOMER:\n name: placeholder\nSITES:\n BRANCH1:\n basic_settings:\n mode: client\n model: cbvpx\n site_name: BRANCH1_KVMVPX\n vm_ip: placeholder\n MCN:\n basic_settings:\n mode: primary_mcn\n model: cbvpx\n site_name: MCN_KVMVPX\n vm_ip: placeholder\nCLIENT:\n ip: placeholder\n username: placeholder\n password: placeholder\nSERVER:\n ip: placeholder\n username: placeholder\n password: placeholder\n \n\"\"\"\ntestbed_description[\"ENV\"][\"ORCHESTRATOR\"][\"name\"] = \"localstack\"\ntestbed_description[\"ENV\"][\"ORCHESTRATOR\"][\"ip\"] = localstack_ip\ntestbed_description[\"ENV\"][\"CUSTOMER\"][\"name\"] = lab_name\n\ntestbed_description[\"SITES\"][\"BRANCH1\"][\"basic_settings\"][\"site_name\"] = branch_name\ntestbed_description[\"SITES\"][\"BRANCH1\"][\"vm_ip\"] = branch_mgmt_ip\n\ntestbed_description[\"SITES\"][\"MCN\"][\"basic_settings\"][\"site_name\"] = mcn_name\ntestbed_description[\"SITES\"][\"MCN\"][\"vm_ip\"] = mcn_mgmt_ip\n\ntestbed_description[\"CLIENT\"][\"ip\"] = tf_output['mcn-host1']['value']['mgmt_ip']\ntestbed_description[\"SERVER\"][\"ip\"] = tf_output['branch-host1']['value']['mgmt_ip']\n\nprint(\"Testbed descriptor yaml:\")\nprint(testbed_description)\n\nwith open('edge_config.yaml', 'w') as f:\n yaml.safe_dump(testbed_description, f, allow_unicode=True, default_flow_style=False)\n\nsys.exit(0)\n\n\n","repo_name":"gioulis/jenkins","sub_path":"scripts/generate_testbed_yaml.py","file_name":"generate_testbed_yaml.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"14210691380","text":"def solution(s):\n if len(s) == 4 or len(s) == 6:\n num = [str(i) for i in range(10)]\n s = list(s)\n S = s.copy()\n for i in s:\n if i in num :\n S.remove(i)\n return True if len(S) == 0 else False\n\n else: return False\n\n ","repo_name":"lizardnote/coding_test","sub_path":"프로그래머스/lv1/12918. 문자열 다루기 기본/문자열 다루기 기본.py","file_name":"문자열 다루기 기본.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"467785348","text":"from django.core.management.base import BaseCommand, CommandError\nimport os\nimport shutil\n\nfrom ._tools import get_p4a_args, update_apk, parcefiles, overwrite_p4a, clean_build, read_configuration\n\nclass Command(BaseCommand):\n help = 'Create a build for new project'\n can_import_settings = True\n\n\n #----------------------------------------------------------------------\n def add_arguments(self, parser):\n \"\"\"\"\"\"\n\n parser.add_argument(\n '--clean',\n action='store_true',\n dest='clean',\n default=True,\n help='Delete all build components',\n )\n\n #----------------------------------------------------------------------\n def handle(self, *args, **options):\n from django.conf import settings\n\n update_apk(settings)\n\n build_dir = os.path.join(settings.ANDROID['BUILD']['build'], os.path.split(settings.BASE_DIR)[-1])\n os.chdir(build_dir)\n\n if options['clean']:\n clean_build(settings)\n\n argv = read_configuration(settings)\n #host_python = \"python{}.{}\".format(*platform.python_version_tuple()[:2])\n os.system('p4a create {}'.format(argv))\n overwrite_p4a(settings)\n\n","repo_name":"okkype/PillarPOS","sub_path":"linux_x86/lib/python3.4/site-packages/djangoforandroid/builder/management/commands/androidcreate.py","file_name":"androidcreate.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"12606138744","text":"from rovecomm import*\nimport time\nimport struct\nimport socket\nimport sys\nfrom contextlib import contextmanager\nfrom io import StringIO\nfrom unittest.mock import Mock\n\nthis = sys.modules[__name__]\nsys.path.insert(0, \"../\")\n\n# Declaring the our rovecomm instance\nthis.rovecomm_node = RoveComm(11000, (\"127.0.0.1\", 11111))\n\n# Dict of packets, each test will use a different data_id so they don't interfere\nglobal responses\nresponses = {}\n\n\ndef test_udp():\n global responses\n this.rovecomm_node.set_callback(4242, handle_packet)\n\n packet = RoveCommPacket(4242, \"b\", (1, 3), \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 1\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[4242].data == packet.data\n assert responses[4242].data_type == packet.data_type\n assert responses[4242].data_count == packet.data_count\n assert responses[4242].data_id == packet.data_id\n\n\ndef test_tcp():\n global responses\n this.rovecomm_node.set_callback(4241, handle_packet)\n\n # The RoveCommTcp class includes both a server socket and\n # a dictionary of connection sockets, allowing it to create\n # a TCP connection between its server and one of the dictionary\n # sockets\n packet = RoveCommPacket(4241, \"b\", (1, 4), \"127.0.0.1\", 11111)\n assert this.rovecomm_node.write(packet, True) == 1\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[4241].data == packet.data\n assert responses[4241].data_type == packet.data_type\n assert responses[4241].data_count == packet.data_count\n assert responses[4241].data_id == packet.data_id\n\n\ndef test_tcp_subscribers():\n global responses\n this.rovecomm_node.set_callback(4243, handle_packet)\n\n # The RoveCommTcp class includes both a server socket and\n # a dictionary of connection sockets, allowing it to create\n # a TCP connection between its server and one of the dictionary\n # sockets\n packet = RoveCommPacket(4243, \"b\", (1, 4), \"127.0.0.1\", 11111)\n\n this.rovecomm_node.tcp_node.connect((\"127.0.0.1\", 11111))\n\n assert this.rovecomm_node.write(packet, True) == 1\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[4243].data == packet.data\n assert responses[4243].data_type == packet.data_type\n assert responses[4243].data_count == packet.data_count\n assert responses[4243].data_id == packet.data_id\n\n\ndef test_udp_external():\n global responses\n this.rovecomm_node.set_callback(4240, handle_packet)\n\n # Test socket to try to send to RoveComm over UDP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind((\"127.0.0.1\", 11001))\n\n rovecomm_packet = struct.pack(ROVECOMM_HEADER_FORMAT, ROVECOMM_VERSION, 4240, 2, 0)\n data = (1, 6)\n for i in data:\n rovecomm_packet = rovecomm_packet + struct.pack(\">b\", i)\n\n s.sendto(rovecomm_packet, (\"127.0.0.1\", 11000))\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[4240].data == data\n assert responses[4240].data_type == \"b\"\n assert responses[4240].data_count == len(data)\n assert responses[4240].data_id == 4240\n s.close()\n\n\ndef test_tcp_external():\n global responses\n this.rovecomm_node.set_callback(4239, handle_packet)\n\n # Test socket to try to send to RoveComm over TCP\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((\"127.0.0.1\", 11111))\n rovecomm_packet = struct.pack(ROVECOMM_HEADER_FORMAT, ROVECOMM_VERSION, 4239, 2, 0)\n data = (1, 5)\n for i in data:\n rovecomm_packet = rovecomm_packet + struct.pack(\">b\", i)\n\n s.send(rovecomm_packet)\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[4239].data == data\n assert responses[4239].data_type == \"b\"\n assert responses[4239].data_count == len(data)\n assert responses[4239].data_id == 4239\n\n\ndef test_invalid_target():\n # Sends to a port that won't be available\n packet = RoveCommPacket(4238, \"b\", (0,), \"127.0.0.1\", 99999)\n assert this.rovecomm_node.write(packet, True) == 0\n assert this.rovecomm_node.write(packet, False) == 0\n\n\ndef test_callback_exception():\n global response\n this.rovecomm_node.set_callback(4237, handle_packet_exception)\n\n packet = RoveCommPacket(4237, \"b\", (1, 3), \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 1\n\n # RoveComm passes on callback exception, nothing else to do\n\n\ndef test_print_packet():\n packet = RoveCommPacket(4236, \"b\", (1, 4), \"127.0.0.1\", 11111)\n # Temporarily changes stdout to write into an easily accessible output\n with captured_output() as (out, err):\n packet.print()\n output = out.getvalue().strip()\n\n assert (\n output\n == \"\"\"----------\nID: 4236\nType: b\nCount: 2\nIP: ('127.0.0.1', 11111)\nData: (1, 4)\n----------\"\"\"\n )\n\n\ndef test_invalid_write_udp():\n # Sends non-tuple data\n packet = RoveCommPacket(4235, \"b\", \"a\", \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 0\n\n\ndef test_invalid_write_tcp():\n # Sends non-tuple data\n packet = RoveCommPacket(4235, \"b\", \"a\", \"127.0.0.1\", 11111)\n assert this.rovecomm_node.write(packet, True) == 0\n\n\ndef test_udp_subscribe():\n assert this.rovecomm_node.udp_node.subscribe(\"0\") == 1\n # Couldn't test receipt of packet because it sends to external IP\n\n global responses\n this.rovecomm_node.set_callback(3, handle_packet)\n\n packet = RoveCommPacket(3, \"b\", (), \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 1\n\n time.sleep(0.05)\n assert responses[3].data_id == packet.data_id\n assert len(this.rovecomm_node.udp_node.subscribers) == 1\n\n # Main target should be invalid\n this.rovecomm_node.set_callback(4234, handle_packet)\n packet2 = RoveCommPacket(4234, \"b\", (), \"\", 0)\n assert this.rovecomm_node.write(packet2, False) == 1\n\n # Packet should still be recieved because we're subscribed\n time.sleep(0.05)\n assert responses[4234].data == packet2.data\n assert responses[4234].data_type == packet2.data_type\n assert responses[4234].data_count == packet2.data_count\n assert responses[4234].data_id == packet2.data_id\n\n\ndef test_udp_unsubscribe():\n global responses\n this.rovecomm_node.set_callback(4, handle_packet)\n\n packet = RoveCommPacket(4, \"b\", (), \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 1\n\n time.sleep(0.05)\n assert responses[4].data_id == packet.data_id\n assert len(this.rovecomm_node.udp_node.subscribers) == 0\n\n # Main target should be invalid\n this.rovecomm_node.set_callback(4233, handle_packet)\n packet2 = RoveCommPacket(4233, \"b\", (), \"\", 0)\n assert this.rovecomm_node.write(packet2, False) == 1\n\n # Packet should not still be recieved because we unsubscribed\n time.sleep(0.05)\n assert responses.get(4233) is None\n\n\ndef test_set_default_callback():\n global responses\n this.rovecomm_node.set_default_callback(handle_packet)\n\n packet = RoveCommPacket(1212, \"b\", (1, 3), \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 1\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[1212].data == packet.data\n assert responses[1212].data_type == packet.data_type\n assert responses[1212].data_count == packet.data_count\n assert responses[1212].data_id == packet.data_id\n\n\ndef test_clear_default_callback():\n global responses\n this.rovecomm_node.clear_default_callback()\n\n packet = RoveCommPacket(1515, \"b\", (1, 3), \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 1\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert 1515 not in responses\n\n\ndef test_clear_callback():\n global responses\n this.rovecomm_node.set_callback(2121, handle_packet)\n this.rovecomm_node.clear_callback(2121)\n\n packet = RoveCommPacket(2121, \"b\", (1, 3), \"127.0.0.1\", 11000)\n assert this.rovecomm_node.write(packet, False) == 1\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert 2121 not in responses\n\n\ndef test_invalid_rovecomm_version_tcp():\n global responses\n # 5 is the data id for the invalid version return packet\n this.rovecomm_node.set_callback(5, handle_packet)\n\n # Test socket to try to send to RoveComm over TCP\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((\"127.0.0.1\", 11111))\n rovecomm_packet = struct.pack(ROVECOMM_HEADER_FORMAT, 1, 4232, 2, 0)\n data = (1, 7)\n for i in data:\n rovecomm_packet = rovecomm_packet + struct.pack(\">b\", i)\n\n s.send(rovecomm_packet)\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[5].data == (1,)\n assert responses[5].data_type == \"b\"\n assert responses[5].data_count == 1\n assert responses[5].data_id == 5\n\n\ndef test_invalid_rovecomm_version_udp():\n global responses\n responses.pop(5, None)\n # 5 is the data id for the invalid version return packet\n this.rovecomm_node.set_callback(5, handle_packet)\n\n # Test socket to try to send to RoveComm over UDP\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind((\"127.0.0.1\", 11001))\n\n rovecomm_packet = struct.pack(ROVECOMM_HEADER_FORMAT, 1, 4231, 2, 0)\n data = (1, 6)\n for i in data:\n rovecomm_packet = rovecomm_packet + struct.pack(\">b\", i)\n\n s.sendto(rovecomm_packet, (\"127.0.0.1\", 11000))\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n assert responses[5].data == (1,)\n assert responses[5].data_type == \"b\"\n assert responses[5].data_count == 1\n assert responses[5].data_id == 5\n s.close()\n\n\ndef test_read_exception_udp():\n global responses\n # 0 is the data id for the default packet, sent when exception is raised\n this.rovecomm_node.set_callback(0, handle_packet)\n\n # A mock socket to throw an exception when a read occurs\n mock = Mock()\n mock.recvfrom.side_effect = Exception\n temp = this.rovecomm_node.udp_node.RoveCommSocket\n mock.fileno.return_value = temp.fileno()\n this.rovecomm_node.udp_node.RoveCommSocket = mock\n\n # Send a packet so recv can be triggered\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind((\"127.0.0.1\", 11001))\n\n rovecomm_packet = struct.pack(ROVECOMM_HEADER_FORMAT, ROVECOMM_VERSION, 4230, 2, 0)\n data = (1, 6)\n for i in data:\n rovecomm_packet = rovecomm_packet + struct.pack(\">b\", i)\n\n s.sendto(rovecomm_packet, (\"127.0.0.1\", 11000))\n\n # Give the listener thread a moment to catch the packet\n time.sleep(0.05)\n # Match the packet to the blank packet\n assert responses[0].data == ()\n assert responses[0].data_type == \"b\"\n assert responses[0].data_count == 0\n assert responses[0].data_id == 0\n\n # Gives the udp node its socket back\n this.rovecomm_node.udp_node.RoveCommSocket = temp\n s.close()\n\n\ndef test_listener_shutdown():\n # Store data to recreate the instance afterwards\n callbacks = this.rovecomm_node.callbacks\n udp_port = this.rovecomm_node.udp_node.rove_comm_port\n tcp_addr = this.rovecomm_node.tcp_node.server.getsockname()\n\n # Run the test\n assert this.rovecomm_node.thread.is_alive()\n this.rovecomm_node.close_thread()\n assert not this.rovecomm_node.thread.is_alive()\n\n # Reopen the thread to avoid any problems with other tests\n this.rovecomm_node = RoveComm(udp_port, tcp_addr)\n this.rovecomm_node.callbacks = callbacks\n assert this.rovecomm_node.thread.is_alive()\n\ndef test_get_manifest():\n # Make sure we can load the manifest and it returns an actual value\n manifest = get_manifest()\n assert manifest != None\n\n\n\ndef handle_packet(packet):\n global responses\n responses[packet.data_id] = packet\n\n\ndef handle_packet_exception(packet):\n raise Exception\n\n\n# Redirects sys.stdout to be captured more easily for testing\n@contextmanager\ndef captured_output():\n new_out, new_err = StringIO(), StringIO()\n old_out, old_err = sys.stdout, sys.stderr\n try:\n sys.stdout, sys.stderr = new_out, new_err\n yield sys.stdout, sys.stderr\n finally:\n sys.stdout, sys.stderr = old_out, old_err\n","repo_name":"MissouriMRDT/RoveComm_Python","sub_path":"tests/rovecomm_test.py","file_name":"rovecomm_test.py","file_ext":"py","file_size_in_byte":12371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"2143289593","text":"from ..func import Func, TestData\nfrom ....errors import SimMemoryError\n\n\nclass malloc(Func):\n def __init__(self):\n super().__init__() # pylint disable=useless-super-delegation\n\n def num_args(self):\n return 1\n\n def get_name(self):\n return \"malloc\"\n\n def gen_input_output_pair(self):\n return None\n\n def pre_test(self, func, runner):\n # we should not get a real output from the function with a value this large\n num = 0xFFFF0000\n test_input = [num]\n test_output = [None]\n return_val = None\n max_steps = 10\n test = TestData(test_input, test_output, return_val, max_steps)\n state = runner.get_out_state(func, test, concrete_rand=True)\n if state is not None and 0x10 < state.solver.eval(state.regs.eax) < 0xFFFFFFF0:\n return False\n\n # we should be able to get different outputs if we call malloc multiple times\n num = 0x80\n test_input = [num]\n test_output = [None]\n return_val = None\n\n max_steps = 40\n test = TestData(test_input, test_output, return_val, max_steps)\n returned_locs = []\n state = runner.get_out_state(func, test, concrete_rand=True)\n if state is None:\n return False\n returned_locs.append(state.solver.eval(state.regs.eax))\n\n for i in range(6): # pylint disable=unused-variable\n state = runner.get_out_state(func, test, initial_state=state, concrete_rand=True)\n if state is None:\n return False\n returned_locs.append(state.solver.eval(state.regs.eax))\n if any(a < 0x3000 for a in returned_locs):\n return False\n\n # if we got the same value 2x it didnt work\n if len(set(returned_locs)) != len(returned_locs):\n return False\n\n # if we got 0 it didn't work\n if any(a == 0 for a in returned_locs):\n return False\n\n # if they are all multiples of 0x1000 it seems to be always calling allocate\n if all(a % 0x1000 == returned_locs[0] % 0x1000 for a in returned_locs):\n return False\n\n # they all should be writable/readable\n try:\n if any(state.solver.eval(state.memory.permissions(a)) & 3 != 3 for a in returned_locs):\n return False\n except SimMemoryError:\n return False\n\n # we should be able to call malloc 0xf00 afterwards\n num = 0xF00\n test_input = [num]\n test_output = [None]\n return_val = None\n\n max_steps = 40\n test = TestData(test_input, test_output, return_val, max_steps)\n returned_locs = []\n state = runner.get_out_state(func, test, initial_state=state, concrete_rand=True)\n\n if state is None:\n return False\n\n res = state.solver.eval(state.regs.eax)\n if res < 0x10 or res > 0xFFFFFFF0:\n return False\n\n # we should get different values if we try with a different size\n num = 0x320\n test_input = [num]\n test_output = [None]\n return_val = None\n max_steps = 10\n test = TestData(test_input, test_output, return_val, max_steps)\n returned_locs2 = []\n state = runner.get_out_state(func, test, concrete_rand=True)\n if state is None:\n return False\n returned_locs2.append(state.solver.eval(state.regs.eax))\n\n for i in range(10):\n state = runner.get_out_state(func, test, initial_state=state, concrete_rand=True)\n if state is None:\n return False\n returned_locs2.append(state.solver.eval(state.regs.eax))\n if any(a < 0x3000 for a in returned_locs2):\n return False\n\n if returned_locs == returned_locs2:\n return False\n\n return True\n","repo_name":"angr/angr","sub_path":"angr/analyses/identifier/functions/malloc.py","file_name":"malloc.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","stars":6898,"dataset":"github-code","pt":"89"} +{"seq_id":"19667703472","text":"from joblib import load\nfrom preprocessing.cleaning_data import preprocess\n\n\ninputdict = {\n \"0\": {\n \"location\": 1050,\n \"type\": \"APARTMENT\",\n \"room_number\": 1,\n \"area\": 50,\n \"kitchen_equipped\": 0,\n \"furnished\": 0,\n \"fireplace\": 0,\n \"terrace\": 0,\n \"terrace_area\": 0,\n \"garden\": 0,\n \"garden_area\": 0,\n \"land_surface\": 0,\n \"facade_count\": 2,\n \"swimming_pool\": 0,\n \"building_condition\": \"GOOD\"\n }\n}\n\n\ndef predict():\n estim = load('model/model.joblib')\n ds = preprocess(input_dict=inputdict)\n predicted_price = round(estim.predict(ds)[0] / 1e3, 0) * 1e3 # the price will be a multiple of 1000€\n\n return predicted_price\n\n\n'''\nprint(predict())\n'''\n","repo_name":"lyesds/challenge-api-deployment","sub_path":"predict/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"35324786514","text":"import cv2\nfrom socket import *\n\ncordinate_x = [84,\n 162,\n 240,\n 318,\n 396,\n 474,\n 552,\n 630,\n 708\n ]\ncordinate_y = [48,\n 126,\n 204,\n 282,\n 360,\n 440,\n 518,\n 596,\n 674,\n 752\n ]\n\nimg = cv2.imread('pad_800.jpg')\nR_shuai = cv2.imread('R_shuai.png')\nR_bin = cv2.imread('R_bin.png')\nR_ju = cv2.imread('R_ju.png')\nR_ma = cv2.imread('R_ma.png')\nR_pao = cv2.imread('R_pao.png')\nR_shi = cv2.imread('R_shi.png')\nR_xiang = cv2.imread('R_xiang.png')\n\nB_jiang = cv2.imread('B_jiang.png')\nB_zu = cv2.imread('B_zu.png')\nB_ju = cv2.imread('B_ju.png')\nB_ma = cv2.imread('B_ma.png')\nB_pao = cv2.imread('B_pao.png')\nB_shi = cv2.imread('B_shi.png')\nB_xiang = cv2.imread('B_xiang.png')\n\nR_shuai_s = cv2.imread('R_shuai_s.png')\nR_bin_s = cv2.imread('R_bin_s.png')\nR_ju_s = cv2.imread('R_ju_s.png')\nR_ma_s = cv2.imread('R_ma_s.png')\nR_pao_s = cv2.imread('R_pao_s.png')\nR_shi_s = cv2.imread('R_shi_s.png')\nR_xiang_s = cv2.imread('R_xiang_s.png')\n\nB_jiang_s = cv2.imread('B_jiang_s.png')\nB_zu_s = cv2.imread('B_zu_s.png')\nB_ju_s = cv2.imread('B_ju_s.png')\nB_ma_s = cv2.imread('B_ma_s.png')\nB_pao_s = cv2.imread('B_pao_s.png')\nB_shi_s = cv2.imread('B_shi_s.png')\nB_xiang_s = cv2.imread('B_xiang_s.png')\n\nR_pieces_cordinate = [\n [4, 9],\n [0, 6],\n [2, 6],\n [4, 6],\n [6, 6],\n [8, 6],\n [0, 9],\n [8, 9],\n [1, 9],\n [7, 9],\n [1, 7],\n [7, 7],\n [3, 9],\n [5, 9],\n [2, 9],\n [6, 9]\n]\n\nB_pieces_cordinate = [\n [4, 0],\n [0, 3],\n [2, 3],\n [4, 3],\n [6, 3],\n [8, 3],\n [0, 0],\n [8, 0],\n [1, 0],\n [7, 0],\n [1, 2],\n [7, 2],\n [3, 0],\n [5, 0],\n [2, 0],\n [6, 0]\n]\n\n# 1将2卒3车4马5炮6士7相\n\ntp = [\n 1,\n 2,\n 2,\n 2,\n 2,\n 2,\n 3,\n 3,\n 4,\n 4,\n 5,\n 5,\n 6,\n 6,\n 7,\n 7\n]\n\nmove_1 = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n\nmove_2 = [[1, 0], [-1, 0], [0, -1]]\n\nmove_4 = [[1, 2], [2, 1], [-1, 2], [2, -1], [1, -2], [-2, 1], [-1, -2], [-2, -1]]\n\nunaccess_ma = [[0,1],[1,0],[0,1],[1,0],[0,-1],[-1,0],[0,-1],[-1,0]]\n\nmove_6 = [[-1, -1], [1, -1], [1, 1], [-1, 1]]\n\nR_pieces = [R_shuai,\n R_bin,\n R_bin,\n R_bin,\n R_bin,\n R_bin,\n R_ju,\n R_ju,\n R_ma,\n R_ma,\n R_pao,\n R_pao,\n R_shi,\n R_shi,\n R_xiang,\n R_xiang\n ]\n\nB_pieces = [B_jiang,\n B_zu,\n B_zu,\n B_zu,\n B_zu,\n B_zu,\n B_ju,\n B_ju,\n B_ma,\n B_ma,\n B_pao,\n B_pao,\n B_shi,\n B_shi,\n B_xiang,\n B_xiang,\n ]\nR_pieces_s = [R_shuai_s,\n R_bin_s,\n R_bin_s,\n R_bin_s,\n R_bin_s,\n R_bin_s,\n R_ju_s,\n R_ju_s,\n R_ma_s,\n R_ma_s,\n R_pao_s,\n R_pao_s,\n R_shi_s,\n R_shi_s,\n R_xiang_s,\n R_xiang_s\n ]\n\nB_pieces_s = [B_jiang_s,\n B_zu_s,\n B_zu_s,\n B_zu_s,\n B_zu_s,\n B_zu_s,\n B_ju_s,\n B_ju_s,\n B_ma_s,\n B_ma_s,\n B_pao_s,\n B_pao_s,\n B_shi_s,\n B_shi_s,\n B_xiang_s,\n B_xiang_s\n ]\n\nactive_R = [True] * 16\nactive_B = [True] * 16\n\nblock_size = 16\n\nblock = cv2.imread('block.png')\nblock = cv2.resize(block, (block_size, block_size))\n\nw = 50\n\nh = 50\n\ndestination = []\nsel = [0]\n\nstate = [True] * 2\n\nPORT = 50000\n\nBUFLEN = 512\n\nIP = '106.54.224.13'\n\n\nclass sock:\n dataSocket = socket(AF_INET, SOCK_STREAM)\n\n","repo_name":"jiangjie217527/ChineseChessOL","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"14793980133","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File : File09.py\n# Author: roohom\n# Date : 2018/10/21 0021\n\n\n# 将结构化数据写入文件并作相关的处理\n\n\nimport struct\n\nf = open(\"FILE09.dat\", \"wb\")\nn = input(\"请输入学生人数:\")\ns = struct.pack(\"i\", int(n))\nm = s.decode(\"utf-8\")\nf.write(bytes(m, \"utf-8\"))\n\ns2 = \"学号 姓名 C语言从学习到转行 C++从入门到放弃 MySQL从删库到跑路\"\nf.write(s2.encode(\"utf-8\"))\ni = 0\n\nwhile i < int(n):\n num = input(\"请输入第\" + str(i+1) + \"人的学号(2位):\")\n name = input(\"请输入姓名:\")\n a1 = input(\"请输入C语言从工作到转行成绩:\")\n a2 = input(\"请输入从C++从入门到放弃成绩:\")\n a3 = input(\"请输入MySQL从删库到跑路成绩:\")\n\n s = num + name\n s1 = s + str(int(a1)) + str(int(a2)) + str(int(a3))\n\n f.write(s1.encode(\"utf-8\"))\n i += 1\nf.close()\n","repo_name":"roohom/FileAndIO","sub_path":"practices/File09.py","file_name":"File09.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"8649186401","text":"'''\n Works both for python2 and python3.\n\n Task : \"Time Is All I Have\"\n Basic Algorithm:\n 1. Read necessary lines as string(char array)\n 2. Extract the time in that line as seconds and store it in a list.\n 3. Evaluate the lists to get the required results.\n\n Functions 'extractTimeExtensive' and 'extractTime' have the same purpose.\n However, the former is slower but safer as trailing spaces wouldn't cause an issue.\n\n Pre-defined function round is being used,\n due to inaccuracy of float (trailing decimal places might not be zero).\n'''\n\ndef extractTimeExtensive(line, list):\n minutes = 0\n milliseconds = 0\n\n i = 3\n min = True\n while True:\n i += 1\n if line[i] == 's':\n break\n elif line[i] == '.' or line[i] == ' ':\n continue\n elif line[i] == 'm':\n min = False\n elif min:\n minutes *= 10\n minutes += float(line[i])\n else:\n milliseconds *= 10\n milliseconds += float(line[i])\n\n seconds = (minutes * 60) + (milliseconds / 1000)\n list.append(round(seconds, 3))\n\ndef extractTime(line, list):\n seconds = 0\n factor = 0.001\n i = -2\n while True:\n i -= 1\n if line[i] == 'm':\n break\n elif line[i] == '.':\n continue\n else:\n seconds += float(line[i]) * factor\n factor *= 10\n\n factor = 60\n while True:\n i -= 1\n if line[i] == ' ':\n break\n else:\n seconds += float(line[i]) * factor\n factor *= 10\n list.append(round(seconds, 3))\n\ndef analyzeTime(list):\n num = len(list)\n total = 0.0\n mean = 0.0\n variance_numerator = 0.0\n std_dev = 0.0\n\n for i in range(len(list)):\n total += list[i]\n\n mean = total / num\n mean = round(mean, 3)\n\n for i in range(len(list)):\n variance_numerator += (list[i] - mean)**2\n\n std_dev = (variance_numerator / num)**(0.5)\n std_dev = round(std_dev, 3)\n\n count = 0\n for i in range(len(list)):\n if (list[i] >= mean - std_dev) and (list[i] <= mean + std_dev):\n count += 1\n\n return (mean, std_dev, count)\n\ndef main():\n real = []\n user = []\n sys = []\n\n with open('timestat.txt', 'r') as timestat_data:\n for line in timestat_data:\n if line[0] == 'r':\n extractTimeExtensive(line, real)\n elif line[0] == 'u':\n extractTimeExtensive(line, user)\n elif line[0] == 's':\n extractTimeExtensive(line, sys)\n\n no_of_runs = len(real)\n\n real = analyzeTime(real)\n user = analyzeTime(user)\n sys = analyzeTime(sys)\n\n print(\"Total number of runs: \" + str(no_of_runs))\n print(\"\\nAverage Time Statistics:\")\n print(\"real \" + str(real[0]) + \"s\")\n print(\"user \" + str(user[0]) + \"s\")\n print(\"sys \" + str(sys[0]) + \"s\")\n print(\"\\nStandard deviation of Time statistics:\")\n print(\"real \" + str(real[1]) + \"s\")\n print(\"user \" + str(user[1]) + \"s\")\n print(\"sys \" + str(sys[1]) + \"s\")\n print(\"\\nNumber of runs within average - standard deviation to average + standard deviation:\")\n print(\"real \" + str(real[2]))\n print(\"user \" + str(user[2]))\n print(\"sys \" + str(sys[2]))\n\nif __name__ == '__main__':\n main()\n","repo_name":"DEVANSH-DVJ/WnCC-Assignment","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":3328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"35697838411","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 4 09:34:07 2019\r\n\r\n@author: jon\r\n\"\"\"\r\n\r\n# method 1\r\ng = (x*x for x in range(1,10))\r\nfor n in g:\r\n print(n)\r\n\r\n# method 2\r\ndef fib(max):\r\n n, a, b = 0, 0, 1\r\n while n < max:\r\n yield b\r\n a, b = b, a + b\r\n n = n + 1\r\n return 'done'\r\n\r\nf = fib(6)\r\nfor x in f:\r\n print(x)\r\n\r\n# or do it like this\r\nfor x in fib(6):\r\n print(x)\r\n \r\n# or to get return value, we should catch the exception\r\nz = fib(6)\r\nwhile True:\r\n try:\r\n j = next(z)\r\n print(\"z:\", j)\r\n except StopIteration as e:\r\n print('Generator return value:', e.value)\r\n break\r\n \r\n# pratice 杨辉三角\r\ndef triangles():\r\n a = [1]\r\n while True:\r\n yield a\r\n a = [1] + [a[i] + a[i+1] for i in range(len(a)-1)] + [1]\r\n \r\nt = triangles()\r\nfor z in range(10):\r\n print(next(t))\r\n\r\n \r\n","repo_name":"ounie-os/py_practice","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"31514479528","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# 两个N位的二进制数相加\n\"\"\"\n Topic: sample\n Desc : 两个N位的二进制数相加\n\"\"\"\n__author__ = 'Xiong Neng'\n\n\ndef biAdd(a, b):\n res = []\n m = 0\n r = list(range(0, len(a)))\n r.reverse()\n for i in r:\n m, n = divmod(a[i] + b[i] + m, 2)\n res.insert(0, n)\n res.insert(0, m)\n return res\n\n\nif __name__ == '__main__':\n print(biAdd([0, 1, 0, 1, 1, 0], [1, 1, 0, 1, 1, 0]))\n","repo_name":"yidao620c/core-algorithm","sub_path":"algorithms/c13_sample/m04_binary_add.py","file_name":"m04_binary_add.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":762,"dataset":"github-code","pt":"89"} +{"seq_id":"42925489120","text":"from itertools import permutations\n\ndef calc(num1, num2, oper):\n if oper == '+':\n return num1 + num2\n elif oper == '-':\n return num1 - num2\n else:\n return num1 * num2\n\ndef solution(expression):\n # 최대상금 초기화\n max_reward = 0\n\n # expression > list 작업\n expression = expression.replace('+', ' + ') \\\n .replace('-', ' - ') \\\n .replace('*', ' * ').split()\n\n # permutaion - 모든 우선순위 고려\n # 모든 priority 에 대한 상금을 확인 & 최대값 갱신\n\n for perm in permutations(('+', '-', '*'), 3):\n priority = {}\n for i in range(3):\n priority[perm[i]] = i\n\n # 우선순위 바탕으로 계산결과 구하기\n stack = [] # 숫자를 임시적으로 저장\n opers = [] # 연산자를 //\n\n for elem in expression:\n # 해당 원소가 연산자인 경우\n if elem in priority:\n while opers and priority[elem] <= priority[opers[-1]]:\n n2 = stack.pop()\n n1 = stack.pop()\n tmp = calc(n1, n2, opers.pop())\n stack.append(tmp)\n opers.append(elem)\n # 숫자인 경우\n else:\n stack.append(int(elem))\n\n # 아직 남은 숫자들을 모두 연산해준다.\n while opers:\n n2 = stack.pop()\n n1 = stack.pop()\n tmp = calc(n1, n2, opers.pop())\n stack.append(tmp)\n\n # 최대상금 갱신\n now_reward = abs(stack[0])\n if now_reward > max_reward:\n max_reward = now_reward\n\n return max_reward","repo_name":"sungu1516/algorithm-study","sub_path":"programmers/Level2/수식최대화.py","file_name":"수식최대화.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"43534322120","text":"from trytond.model import ModelView, ModelSQL, fields\n\n__all__ = ['TableA', 'TableB', 'TableC', 'TableD',]\n\nclass TableA(ModelSQL, ModelView):\n u'Présence d\\'espaces protégés et gérés par commune'\n __name__ = 'portrait.tablea'\n\n cd_insee = fields.Many2One(\n 'portrait.commune',\n ondelete='CASCADE',\n string = u'CD_INSEE',\n help=u'Code INSEE de la commune',\n required=True,\n ) \n id_mnhn = fields.Many2One(\n 'protection.area',\n ondelete='CASCADE',\n string = u'ID_MNHN',\n help=u'Code du site',\n required=True,\n ) \n nom = fields.Char(\n string='NOM',\n help=u'Nom du site',\n )\n lb_mpro = fields.Char( \n string=u'LB_MPRO',\n help=u'Type d\\'espace',\n )\n\nclass TableB(ModelSQL, ModelView):\n u'Présence de ZNIEFF par commune'\n __name__ = 'portrait.tableb'\n\n cd_insee = fields.Many2One(\n 'portrait.commune',\n ondelete='CASCADE',\n string = u'CD_INSEE',\n help=u'Code INSEE de la commune',\n required=True,\n ) \n nm_sffzn = fields.Many2One(\n 'protection.area',\n ondelete='CASCADE',\n string = u'NM_SFFZN',\n help=u'Code du site',\n required=True,\n ) \n lb_zn = fields.Char( \n string=u'LB_ZN',\n help=u'Nom du site',\n )\n\nclass TableC(ModelSQL, ModelView):\n u'Présence de sites Natura 2000 par commune (SIC/ZPS)'\n __name__ = 'portrait.tablec'\n\n cd_insee = fields.Many2One(\n 'portrait.commune',\n ondelete='CASCADE',\n string = u'CD_INSEE',\n help=u'Code INSEE de la commune',\n required=True,\n ) \n sitecode = fields.Many2One(\n 'protection.area',\n ondelete='CASCADE',\n string = u'SITECODE',\n help=u'Code du site',\n required=True,\n ) \n site_name = fields.Char( \n string=u'SITE_NAME',\n help=u'Nom du site',\n )\n\nclass TableD(ModelSQL, ModelView):\n u'Présence de sites archéozoologiques et archéobotaniques par commune'\n __name__ = 'portrait.tabled'\n\n cd_insee = fields.Many2One(\n 'portrait.commune',\n ondelete='CASCADE',\n string = u'CD_INSEE',\n help=u'Code INSEE de la commune',\n required=True,\n ) \n code_site = fields.Char( \n string = u'CODE_SITE',\n help=u'Code du site',\n required=True,\n ) \n nom_site = fields.Char( \n string=u'NOM_SITE',\n help=u'Nom du site',\n )\n","repo_name":"silpol/tryton-bef","sub_path":"trytond/modules/portrait_espece/espace.py","file_name":"espace.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"18699784899","text":"# 구해야 하는 것: 리프노드(자식의 개수가 0인 노드)\n# 중간에 한 노드를 지우면 지운 노드의 하위 노드들은 다 삭제\n# 1. 꼬리인지 확인하고 2. 최상단 노드에 도달할 수 있는지 조건 두 개 충족해야함\n\n\nN = int(input())\narr = list(map(int, input().split()))\ndel_node_num = int(input())\n# 부모가 아예 없도록\narr[del_node_num] = -2\n\n\nans = 0\nfor i in range(len(arr)-1, -1, -1):\n flag = 0\n # 내 밑에 새끼가 더 없는지 확인\n for j in range(i, len(arr)):\n if i == arr[j]:\n flag = 1\n break\n if flag == 0:\n # 무조건 최상단에 올라가거나 중간에 삭제한 노드에서 빠질 수 밖에 없음\n while True:\n # 한단계 위 idx\n upper_idx = arr[i]\n # 처음부터 삭제된 노드\n if upper_idx == -2:\n break\n if upper_idx == -1:\n ans += 1\n break\n # 올라가다가 부모까지 가거나 삭제된 노드 만나는 경우\n if arr[upper_idx] == -1:\n ans += 1\n break\n elif arr[upper_idx] == -2:\n break\n i = upper_idx\nprint(ans)\n","repo_name":"DongChanKIM2/Algorithm","sub_path":"solved.ac/G5/트리.py","file_name":"트리.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"17854498133","text":"class Solution:\n def tribonacci(self, n: int) -> int:\n if n == 0: return 0\n if n in [1, 2]: return 1\n memo = [0, 1, 1]\n for i in range(3, n + 1):\n s = sum(memo)\n memo[0] = memo[1]\n memo[1] = memo[2]\n memo[2] = s\n return memo[2]","repo_name":"PiotrZb/LeetCode-Problems-Solutions","sub_path":"Easy/1137_NthTribonacciNumber/Python3/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"16835068916","text":"from nie.ss.minio import MinioClient\nfrom loguru import logger\n\n\nclient = MinioClient(\n end_point='192.168.31.245:9000',\n access_key='ponponon',\n secret_key='ponponon',\n bucket_name='nie',\n)\n\nif not client.status.conn.bucket_exists(bucket_name='nie'):\n client.status.conn.make_bucket(bucket_name='nie')\n\nif client.os.path.exists('test/001.txt'):\n with client.open('test/001.txt', 'r', encoding='utf-8') as file:\n logger.debug(file.read())\nelse:\n with client.open('test/001.txt', 'w', encoding='utf-8') as file:\n file.write('nie project is good')\n","repo_name":"ponponon/nie_examples","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"33525458887","text":"# Owner: \n# !/usr/bin/python3\n# 2020/3/20 11:10 PM\n# data_filter.py\n\nfrom appium_robot.robot.tools.function_tools import *\nfrom datetime import datetime, timedelta\nfrom copy import deepcopy\nfrom wcwidth import wcswidth as ww\n\n\nclass DataFilterBase(object):\n \"\"\"\n 功能:筛选过滤数据,支持根据字段值筛选、使用正则匹配筛选和过滤,支持筛选指定时间范围内的数据\n \"\"\"\n def __init__(self, data: list):\n self.__data = deepcopy(data)\n self.data = deepcopy(data)\n\n def field_filter(self, **kwargs):\n \"\"\"\n 根据指定字段筛选,选取指定字段名称存在并且值等于所传参数的数据\n 支持多个字段筛选。relation表示逻辑关系,默认为and\n 使用例子:self.field_filter(city='上海')\n :param kwargs: \n :return: \n \"\"\"\n relation = 'and' if 'relation' not in kwargs else kwargs['relation']\n data = []\n if relation == 'and':\n for record in self.data:\n flag = True\n for key, value in kwargs.items():\n if key not in record or record[key] != value:\n flag = False\n break\n if flag:\n data.append(record)\n elif relation == 'or':\n for record in data:\n for key, value in kwargs.items():\n if key in record and record[key] == value:\n data.append(record)\n break\n else:\n raise ValueError('relation值只能是and或or!')\n self.data = data\n\n def field_filter_in(self, **kwargs):\n \"\"\"\n 根据指定字段筛选,选取指定字段名称存在并且值in所传参数的数据\n 支持多个字段筛选。relation表示逻辑关系,默认为and\n 使用例子:self.field_filter_in(city=['上海', '北京'])\n :param kwargs: \n :return: \n \"\"\"\n relation = 'and' if 'relation' not in kwargs else kwargs['relation']\n data = []\n if relation == 'and':\n for record in self.data:\n flag = True\n for key, values in kwargs.items():\n if key not in record or record[key] not in values:\n flag = False\n break\n if flag:\n data.append(record)\n elif relation == 'or':\n for record in data:\n for key, values in kwargs.items():\n if key in record and record[key] in values:\n data.append(record)\n break\n else:\n raise ValueError('relation值只能是and或or!')\n self.data = data\n\n def field_interval_filter(self, field: str, min_value=None, max_value=None):\n min_value = min_value or 0.0\n max_value = max_value or 10000000000000\n data = []\n for record in self.data:\n if field in record:\n if min_value <= float(record[field]) <= max_value:\n data.append(record)\n self.data = data\n\n def field_bool_filter(self, field: str, key=bool):\n data = []\n for record in self.data:\n if field in record and key(record[field]):\n data.append(record)\n self.data = data\n\n def field_re_filter(self, field: str, pattern='.*'):\n \"\"\"\n 筛选指定字段存在并且值满足pattern模式的数据\n :param field: 字段名称\n :param pattern: 正则模式\n :return: \n \"\"\"\n data = []\n for record in self.data:\n if field in record and re.search(pattern, record[field]):\n data.append(record)\n self.data = data\n\n def field_re_exclude(self, field: str, pattern='\\.\\.\\.'):\n \"\"\"\n 过滤掉指定字段存在并且值满足pattern模式的数据\n :param field: 字段名称\n :param pattern: 正则模式\n :return: \n \"\"\"\n data = []\n for record in self.data:\n if field not in record or not re.search(pattern, record[field]):\n data.append(record)\n self.data = data\n\n @staticmethod\n def strp_datetime(date_time: str):\n \"\"\"\n 将时间转化成datetime对象,目的是可以比较大小,根据时间筛选\n :param date_time: \n :return: \n \"\"\"\n return datetime.strptime(date_time, '%Y-%m-%d %H:%M:%S')\n\n @staticmethod\n def strp_date(date: str):\n \"\"\"\n 将日期转化成datetime对象,目的是可以比较大小,根据日期筛选\n :param date: \n :return: \n \"\"\"\n return datetime.strptime(date, '%Y_%m_%d')\n\n def date_filter(self, time_field: str, start_date=None, end_date=None):\n \"\"\"\n 根据日期筛选数据\n :param time_field: 日期字段名称\n :param start_date: 开始日期\n :param end_date: 结束日期\n :return: \n \"\"\"\n data = []\n if start_date and end_date:\n start_date = self.strp_date(start_date)\n end_date = self.strp_date(end_date) + timedelta(days=1)\n for record in self.data:\n if time_field in record:\n record_datetime = self.strp_datetime(record[time_field])\n if record_datetime and start_date <= record_datetime <= end_date:\n data.append(record)\n elif start_date:\n start_date = self.strp_date(start_date)\n for record in self.data:\n if time_field in record:\n record_datetime = self.strp_datetime(record[time_field])\n if record_datetime and record_datetime >= start_date:\n data.append(record)\n elif end_date:\n end_date = self.strp_date(end_date) + timedelta(days=1)\n for record in self.data:\n if time_field in record:\n record_datetime = self.strp_datetime(record[time_field])\n if record_datetime and record_datetime <= end_date:\n data.append(record)\n else:\n data = self.data\n self.data = data\n\n def timedelta_filter(self, time_field: str, time_delta=None, **kwargs):\n \"\"\"\n 筛选距离现在一定时间范围内的数据,可以精确到秒级,在数据实时性要求高的时候使用\n :param time_field: 时间字段名称\n :param time_delta: timedelta对象\n :param kwargs: 使用该参数生成timedelta对象\n :return: \n \"\"\"\n time_delta = time_delta or timedelta(**kwargs)\n now = datetime.now()\n start_time = now - time_delta\n data = []\n for record in self.data:\n if time_field in record:\n record_datetime = self.strp_datetime(record[time_field])\n if record_datetime and record_datetime >= start_time:\n data.append(record)\n self.data = data\n\n def field_distinct(self, field: str):\n values = set()\n for record in self.data:\n if field in record:\n values.add(record[field])\n return values\n\n def field_min(self, field: str, key=lambda x: x, default=None):\n if self.size >= 1:\n values = []\n for record in self.data:\n if field in record:\n values.append(key(record[field]))\n if values:\n return min(values)\n elif default is not None:\n return default\n\n def field_max(self, field: str, key=lambda x: x, default=None):\n if self.size >= 1:\n values = []\n for record in self.data:\n if field in record:\n values.append(key(record[field]))\n if values:\n return max(values)\n elif default is not None:\n return default\n\n def _field_sum(self, field: str, key=float):\n if self.size >= 1:\n count = 0\n s = 0\n for record in self.data:\n if field not in record:\n continue\n try:\n value = key(record[field])\n s += value\n count += 1\n except Exception as e:\n pass\n # print(record[field])\n # print(e.__repr__())\n print('count:', count)\n return s, count\n\n def field_sum(self, field: str, key=float):\n s, _ = self._field_sum(field=field, key=key)\n return s\n\n def field_average(self, field: str, key=float, ndigits=2):\n s, count = self._field_sum(field=field, key=key)\n return round(s/count, ndigits=ndigits)\n\n def order_by_field(self, field: str, key=lambda x: x, default='', reverse=False):\n self.data.sort(key=lambda record: key(record.get(field, default)), reverse=reverse)\n\n def order_by(self, key, reverse=False):\n self.data.sort(key=key, reverse=reverse)\n\n def display(self, fields: list=None, hide_fields=None, default='', limit=None, width_limit=100, blank_line_between_records=False):\n \"\"\"\n 展示数据\n :return: \n \"\"\"\n if not self.data:\n print('[]')\n return\n if not fields:\n fields = []\n for record in self.data:\n for field in record:\n if field not in fields:\n fields.append(field)\n if hide_fields:\n for field in hide_fields:\n fields.remove(field)\n data = list()\n data.append(fields)\n if limit is None:\n start_index, end_index = 0, len(self.data)\n elif isinstance(limit, int):\n start_index, end_index = 0, limit\n else:\n start_index, end_index = limit\n field_width = dict()\n for record in self.data[start_index: end_index]:\n data.append([])\n for field in fields:\n value = record.get(field, default)\n data[-1].append(value)\n width = ww(str(value))\n if width > field_width.get(field, ww(str(field))):\n field_width[field] = min(width, width_limit)\n for record in data:\n for i in range(len(fields)):\n print(str(record[i]) + ' ' * (field_width.get(fields[i], ww(str(fields[i])))-ww(str(record[i]))), end=' '*3)\n print()\n if blank_line_between_records:\n print()\n\n def commit(self):\n \"\"\"\n 提交当前数据到备份\n :return: \n \"\"\"\n self.__data = deepcopy(self.data)\n\n def revert(self):\n \"\"\"\n 回滚数据\n :return: \n \"\"\"\n self.data = deepcopy(self.__data)\n\n def __len__(self):\n return len(self.data)\n\n @property\n def size(self):\n return len(self.data)\n\n\nif __name__ == '__main__':\n show_module(form='*')\n","repo_name":"chenabest/hello-world","sub_path":"utils/data_sql.py","file_name":"data_sql.py","file_ext":"py","file_size_in_byte":11200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"74992989410","text":"import tensorflow as tf\nfrom absl.testing import parameterized\n\nfrom model.architectures.losses import Momentum_Neighbors_NT_X, Momentum_NT_X, get_neighbors_mask_temporal, \\\n get_neighbors_dt_label_multiclass\n\n\nclass LossTest(parameterized.TestCase, tf.test.TestCase):\n @parameterized.named_parameters((\"base\",))\n def test_NCL_loss(self, ):\n samples = tf.eye(100) # all samples are orthogonal one another\n neigh_queue = tf.random.uniform((500, 2), 0, 1000.0)\n queue = tf.concat([samples, tf.zeros((400, 100))], axis=0)\n outputs_regular_CL = Momentum_NT_X([samples, queue], 0.05)\n outputs_CL_as_NCL = Momentum_Neighbors_NT_X([samples, queue], neigh_queue, 0.05, 1.0,\n get_neighbors_mask_temporal)\n loss_CL = outputs_regular_CL[0]\n loss_CL_as_NCL = outputs_CL_as_NCL[0]\n\n self.assertAlmostEqual(loss_CL.numpy(), 0.0)\n self.assertAlmostEqual(loss_CL_as_NCL.numpy(), 0.0)\n self.assertEqual(loss_CL_as_NCL.numpy(), loss_CL.numpy())\n\n @parameterized.named_parameters(('base', tf.stack([tf.range(100) + 1, -tf.range(100) - 1], axis=1),\n tf.stack([-tf.range(400) - 1, tf.range(400) + 1], axis=1), 0))\n def test_n_w(self, samples, queue, threshold):\n neigh_mat_diag = get_neighbors_mask_temporal(samples, tf.concat([samples, queue], axis=0), threshold)\n neigh_mat_double = get_neighbors_mask_temporal(samples, tf.concat([samples, samples], axis=0), threshold)\n self.assertAllEqual(neigh_mat_diag[:100, :100], tf.eye(100))\n self.assertAllEqual(tf.reduce_sum(neigh_mat_double, axis=1), 2 * tf.ones((100,)))\n\n @parameterized.named_parameters(\n ('base', tf.random.uniform((100, 1), 1, 1000.0), - tf.random.uniform((400, 1), 1, 1000.0)))\n def test_n_Y(self, samples, queue):\n neigh_mat_diag = get_neighbors_dt_label_multiclass(samples, tf.concat([samples, queue], axis=0))\n neigh_mat_double = get_neighbors_dt_label_multiclass(samples, tf.concat([samples, samples], axis=0))\n self.assertAllEqual(neigh_mat_diag[:100, :100], tf.eye(100))\n self.assertAllEqual(tf.reduce_sum(neigh_mat_double, axis=1), 2 * tf.ones((100,)))\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"ratschlab/ncl","sub_path":"model/architectures/tests/losses_test.py","file_name":"losses_test.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"89"} +{"seq_id":"70013037090","text":"# one year temp\r\n\r\nimport pandas as pd\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom datetime import datetime\r\nimport os\r\n\r\ndata = pd.read_csv('CONFIDENTIAL RANCHLAND SOLAR DATA - 5 min data.csv', header=[0, 1])\r\n\r\n\r\n# convert time style\r\ndef convert_date_time(date_string):\r\n date_object = datetime.strptime(date_string, \"%d-%m月-%y %H:%M:%S\")\r\n formatted_date = date_object.strftime(\"%m/%d/%Y %H:%M\")\r\n return formatted_date\r\n\r\n\r\ncolumn_name = data.columns[0]\r\ndata[column_name] = data[column_name].apply(convert_date_time)\r\n\r\na = data.columns.get_level_values(0).to_series()\r\nb = a.mask(a.str.startswith('Unnamed')).ffill().fillna('')\r\ndata.columns = [b, data.columns.get_level_values(1)]\r\ndata = data.fillna(method=\"ffill\")\r\n\r\ndata['Date', 'Timestamp'] = pd.to_datetime(data['Date', 'Timestamp'])\r\ndata.set_index(data['Date', 'Timestamp'], inplace=True)\r\ndata.drop(('Date', 'Timestamp'), inplace=True, axis=1)\r\ndata.replace(['Bad', '#DIV/0!'], pd.NA, inplace=True)\r\ndata.fillna(0, inplace=True)\r\n\r\nname = 'Inv 2'\r\nstarttime = '2022-01-01'\r\nendtime = '2022-12-31'\r\n\r\ntimepoint_start = '9:00'\r\ntimepoint_end = '15:00'\r\n\r\nfile_path = r'C:\\Users\\yandcuo\\Desktop\\Dr\\project\\process\\data\\SunEnergy1_DataSet\\July_Dic_e_t\\one month\\temp 9-15'\r\n\r\n\r\nstart_date = datetime.strptime(starttime, \"%Y-%m-%d\")\r\nend_date = datetime.strptime(endtime, \"%Y-%m-%d\")\r\n\r\n\r\ncurrent_month = start_date\r\nwhile current_month <= end_date:\r\n # 获取当前月份的起始日期和结束日期\r\n current_month_start = current_month.replace(day=1)\r\n next_month_start = (current_month + pd.DateOffset(months=1)).replace(day=1) - pd.DateOffset(days=1)\r\n\r\n # 获取该月的天数\r\n month_days = (next_month_start - current_month_start).days + 1\r\n\r\n\r\n num_rows = 4\r\n num_cols = 8\r\n\r\n\r\n fig_width = 18\r\n fig_height = 9\r\n colors = ['blue', 'red', 'green', 'purple']\r\n\r\n if month_days <= num_rows * num_cols:\r\n num_cols = month_days // num_rows + 1 if month_days % num_rows != 0 else month_days // num_rows\r\n\r\n # 创建子图,每个子图代表一个月\r\n fig, axes = plt.subplots(num_rows, num_cols, figsize=(fig_width, fig_height))\r\n axes = axes.ravel()\r\n\r\n for i in range(month_days):\r\n # 计算当前日期\r\n current_day = current_month_start + pd.DateOffset(days=i)\r\n current_day_str = current_day.strftime('%Y-%m-%d')\r\n\r\n # 获取当前日期的数据\r\n day_data = data.loc[current_day_str]\r\n time_range_data = day_data.between_time(timepoint_start, timepoint_end)\r\n\r\n for j, temp_type in enumerate(['PM1 Temp', 'PM2 Temp', 'PM3 Temp', 'PM4 Temp']):\r\n temp_data = time_range_data[name][temp_type].astype(float)\r\n sns.histplot(data=temp_data, ax=axes[i], color=colors[j], edgecolor='black', kde=True, label=temp_type)\r\n\r\n axes[i].set_title(f'Temp for {current_day_str}', fontsize=7)\r\n axes[i].set_xlabel('Temperature (°C)', fontsize=6)\r\n axes[i].set_ylabel('Frequency', fontsize=6)\r\n axes[i].legend(fontsize=6)\r\n # 删除多余的子图\r\n for j in range(month_days, num_rows * num_cols):\r\n fig.delaxes(axes[j])\r\n\r\n # 保存每个月的图像\r\n month_name = current_month.strftime('%B %Y')\r\n file_name = f'temperature_distribution_{month_name}.svg'\r\n plt.tight_layout()\r\n plt.savefig(os.path.join(file_path, file_name))\r\n plt.show()\r\n plt.close()\r\n # 切换到下一个月份\r\n current_month = next_month_start + pd.DateOffset(days=1)\r\n","repo_name":"yisamu/PV-Plot","sub_path":"Data-Analysis/effi-temp-set/temp_set_time_year.py","file_name":"temp_set_time_year.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"74045165409","text":"import os\nfrom glob import glob\n\nimport numpy as np\nimport pandas as pd\n\nfrom scenario_age_pred.const import DATASET_FILE, COL_ID\n\n\ndef _random(conf):\n all_clients = pd.read_csv(os.path.join(conf['data_path'], DATASET_FILE)).set_index(COL_ID)\n all_clients = all_clients.assign(random=np.random.rand(len(all_clients)))\n return all_clients[['random']]\n\n\ndef _client_agg(conf):\n transactions_train = pd.read_csv(os.path.join(conf['data_path'], 'transactions_train.csv'))\n transactions_test = pd.read_csv(os.path.join(conf['data_path'], 'transactions_test.csv'))\n df_transactions = pd.concat([transactions_train, transactions_test])\n\n agg_features = pd.concat([\n df_transactions.groupby(COL_ID)['amount_rur'].agg(['sum', 'mean', 'std', 'min', 'max']),\n df_transactions.groupby(COL_ID)['small_group'].nunique().rename('small_group_nunique'),\n ], axis=1)\n\n return agg_features\n\n\ndef _small_group_stat(conf):\n transactions_train = pd.read_csv(os.path.join(conf['data_path'], 'transactions_train.csv'))\n transactions_test = pd.read_csv(os.path.join(conf['data_path'], 'transactions_test.csv'))\n df_transactions = pd.concat([transactions_train, transactions_test])\n\n cat_counts_train = pd.concat([\n df_transactions.pivot_table(\n index=COL_ID, columns='small_group', values='amount_rur', aggfunc='count').fillna(0.0),\n df_transactions.pivot_table(\n index=COL_ID, columns='small_group', values='amount_rur', aggfunc='mean').fillna(0.0),\n df_transactions.pivot_table(\n index=COL_ID, columns='small_group', values='amount_rur', aggfunc='std').fillna(0.0),\n ], axis=1, keys=['small_group_count', 'small_group_mean', 'small_group_std'])\n\n cat_counts_train.columns = ['_'.join(map(str, c)) for c in cat_counts_train.columns.values]\n return cat_counts_train\n\n\ndef _metric_learning_embeddings(conf, file_name):\n df = pd.read_pickle(os.path.join(conf['data_path'], file_name))\n df = df.assign(**{COL_ID: lambda x: x[COL_ID].astype(int)}).set_index(COL_ID)\n return df\n\n\ndef load_features(\n conf,\n use_random=False,\n use_client_agg=False,\n use_small_group_stat=False,\n metric_learning_embedding_name=None,\n target_scores_name=None,\n):\n features = []\n if use_random:\n features.append(_random(conf))\n\n if use_client_agg:\n features.append(_client_agg(conf))\n\n if use_small_group_stat:\n features.append(_small_group_stat(conf))\n\n if metric_learning_embedding_name is not None:\n features.append(_metric_learning_embeddings(conf, metric_learning_embedding_name))\n\n return features\n\n\ndef load_scores(conf, target_scores_name):\n valid_files = glob(os.path.join(conf['data_path'], target_scores_name, 'valid', '*'))\n valid_scores = [pd.read_pickle(f).assign(**{COL_ID: lambda x: x[COL_ID].astype(int)}).set_index(COL_ID)\n for f in valid_files]\n\n test_files = glob(os.path.join(conf['data_path'], target_scores_name, 'test', '*'))\n test_scores = [pd.read_pickle(f).assign(**{COL_ID: lambda x: x[COL_ID].astype(int)}).set_index(COL_ID)\n for f in test_files]\n\n return valid_scores, test_scores","repo_name":"dllllb/coles-paper","sub_path":"experiments/scenario_age_pred/scenario_age_pred/features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"89"} +{"seq_id":"70778795492","text":"import logging\nimport os\nimport traceback\n\nfrom fastapi import FastAPI, status\nfrom fastapi.responses import JSONResponse\n\nfrom webserver.models import GetTaskResponse, Result, StartTaskResponse, TaskRequest\nfrom webserver import tasks\n\napp = FastAPI()\n\nlog_level = os.environ.get('LOG_LEVEL', 'INFO')\nlogging.basicConfig(format='[%(levelname)8s] %(asctime)s %(filename)16s:L%(lineno)-3d %(funcName)16s() : %(message)s', level=log_level)\nlogger = logging.getLogger(__name__)\n\ntask_manager = tasks.Manager()\n\n@app.get('/')\nasync def root():\n return {'app': 'autotrim', 'version': '0.1.0'}\n\n@app.post('/task')\nasync def start_task(task_request: TaskRequest) -> StartTaskResponse:\n try:\n task_id = task_manager.start_task(task_request)\n return StartTaskResponse(result=Result(), task_id=task_id)\n except:\n logger.exception('Failed to start task')\n return StartTaskResponse(result=Result(success=False, reason=traceback.format_exc()))\n\n@app.get('/task')\nasync def get_task(task_id: str) -> GetTaskResponse:\n try:\n task = task_manager.get_task(task_id)\n if task is None:\n return JSONResponse(status_code=status.HTTP_404_NOT_FOUND,\n content=GetTaskResponse(result=Result(success=False, reason=f'Task with ID: {task_id} not found')).dict())\n return GetTaskResponse(result=Result(), task=task)\n except:\n logger.exception('Failed to get task')\n return GetTaskResponse(result=Result(success=False, reason=traceback.format_exc()))","repo_name":"shikharbhardwaj/csgo-clips-autotrim","sub_path":"nbs/webserver/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"8374394670","text":"from django.urls import path\n\nfrom . import views\n\napp_name='farmergame'\nurlpatterns = [\n path('', views.root, name='root'),\n path('index/', views.FarmListView.as_view(), name='index'),\n path('/',views.view_farm, name='view_farm'),\n path('/buy/',views.trade_animals, name='trade_animals'),\n path('/button', views.button_trade , name='button_trade')\n]\n","repo_name":"feefladder/django","sub_path":"farmergame/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"42095559554","text":"import numpy as np\n\n\ndef fibonacci_space(size=1000, max_zenith_distance_rad=np.pi):\n \"\"\"\n Fibonacci-sphere/space\n ----------------------\n Returns the coordinates of points on a sphere which form a reasonable\n tiling where the tiles are similar in solid angle and shape.\n Of course it is not a perfect tiling (can not be) but it gets close enough\n to be useful for practical means.\n\n Parameters\n ----------\n size : int\n Number of points on the sphere.\n max_zenith_distance_rad : float\n Maximum zenith-distance (zenith is pos. z-axis) to put points.\n Default is Pi, what is the full sphere.\n\n Inspired by 'Fnord'.\n \"\"\"\n points = []\n phi = np.pi * (np.sqrt(5.0) - 1.0) # golden angle in radians\n\n z_start = 1\n z_stop = np.cos(max_zenith_distance_rad)\n\n for i, z in enumerate(np.linspace(z_start, z_stop, size)):\n radius = np.sqrt(1 - z * z) # radius at z\n\n theta = phi * i # golden angle increment\n\n x = np.cos(theta) * radius\n y = np.sin(theta) * radius\n\n points.append([x, y, z])\n\n return np.array(points)\n","repo_name":"cherenkov-plenoscope/binning_utils","sub_path":"binning_utils/sphere.py","file_name":"sphere.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"4924752115","text":"import streamlit as st\nimport time\nimport cv2\nimport mediapipe as mp\n\n\ndef lateral_raise():\n\n \n\n mp_drawing = mp.solutions.drawing_utils\n mp_pose = mp.solutions.pose\n pose = mp_pose.Pose()\n \n \n # cap = cv2.VideoCapture(\"scripts\\ExerciseDetection\\Videos\\lateral raise.mp4\")\n cap = cv2.VideoCapture(0)\n \n \n \n image_placeholder = st.empty()\n \n \n \n \n \n \n \n \n up = False\n counter = 0\n \n while True:\n success , img = cap.read()\n \n img = cv2.resize(img , (1280,720))\n imgRGB = cv2.cvtColor(img , cv2.COLOR_BGR2RGB)\n results = pose.process(imgRGB)\n if results.pose_landmarks:\n # cv2.putText(img , ltr , (0,50),cv2.FONT_HERSHEY_PLAIN,3,red,12)\n \n \n mp_drawing.draw_landmarks(img , results.pose_landmarks , mp_pose.POSE_CONNECTIONS)\n points = {}\n for id,lm in enumerate(results.pose_landmarks.landmark):\n h,w,c = img.shape\n cx , cy = int(lm.x*w) , int(lm.y*h)\n points[id] = (cx,cy)\n \n cv2.circle(img , points[12] , 15 , (255,0,0),cv2.FILLED)\n cv2.circle(img , points[14] , 15 , (255,0,0),cv2.FILLED)\n cv2.circle(img , points[11] , 15 , (255,0,0),cv2.FILLED)\n cv2.circle(img , points[13] , 15 , (255,0,0),cv2.FILLED)\n \n \n if not up and points[14][1] < points[12][1]:\n up = True\n counter+=1\n elif points[14][1] > points[12][1]:\n up = False\n \n \n \n \n cv2.putText(img , str(counter) , (100,150),cv2.FONT_HERSHEY_PLAIN , 12 , (255,0,0),12)\n \n \n \n \n \n image_placeholder.image(img, channels=\"BGR\", use_column_width=True)\n cv2.waitKey(1)\n ","repo_name":"MihirRajeshPanchal/DataHack-Thunderflow","sub_path":"scripts/LateralRaise/lateral_raise.py","file_name":"lateral_raise.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"13090056208","text":"# -*- coding: utf-8 -*-\n\n'''\nResolução:\n (1) Inicia um loop while infinito. A cada repetição:\n (2) Tenta ler um inteiro n, convertendo em inteiro.\n (3) Caso capturar EOFError, para o loop com break.\n (4) Le um polinômio, em uma mesma linha, separados por \" + \", pelo input().split(\" + \") e\n atribui para expression.\n (5) Cria uma lista vazia para armazenar a expressão derivada e atribui para derivated_expression.\n (6) Inicia um loop for de intervalo [0, len(expression)). A cada repetição:\n (7) Inicia uma variável new_term com uma string vazia para armazenar o termo derivado.\n (8) Inicia uma lista term para ler cada elemento do termo separados por x, pelo expression[i].split(\"x\").\n (9) Inicia uma variável coeficient para armazenar o coeficiente do termo (term[0]), convertido\n em inteiro, pelo int().\n (10) Inicia uma variável exponent para armazenar o expoente do termo (term[1]), convertido\n em inteiro, pelo int().\n (11) Multiplica o coeficiente pelo expoente e atualiza a variável coeficient.\n (12) Decresce 1 na variável exponent.\n (13) Se exponent for maior que 1, concatena em new_term str(coeficient) + \"x\" + str(exponent).\n (14) Senão, concatena str(coeficient) + \"x\".\n (15) Concatena new_term em derivated_expression.\n (16) Imprime derivated_expression em forma de string, com os termos separados por \" + \", pelo join().\n'''\n\nwhile True: # Inicia um loop infinito\n \n try: # Tenta\n n = int(input()) # Le um inteiro indicando a quantidade de termos\n except EOFError: # Se não ouver mais entradas\n break # Para o loop\n \n expression = input().split(\" + \") # Le um polinômio, em uma mesma linha, separados por \" + \"\n derivated_expression = [] # Cria uma lista para armazenar a expressão derivada\n \n for i in range(len(expression)): # Loop que percorre a expressão\n new_term = \"\" # Variável para armazenar o termo derivado\n term = expression[i].split(\"x\") # Coleta a expressão separada por x\n coeficient = int(term[0]) # Coleta o coeficiente\n exponent = int(term[1]) # Coleta o expoente\n \n coeficient *= exponent # Multiplica o o coeficiente pelo expoente\n exponent -= 1 # Decresce o expoente em 1\n \n if exponent > 1: # Se for um expoente maior que 1\n new_term += str(coeficient) + \"x\" + str(exponent) # Concatena o coeficiente + x + expoente\n else: # Senão\n new_term += str(coeficient) + \"x\" # Concatena o coeficiente + x\n derivated_expression += [new_term] # Adiciona o monômio na expressão\n \n print(\" + \".join(derivated_expression)) # Junta a lista de monômios e imprime com um \" + \" entre eles.","repo_name":"FernandoAyach/problem-solving","sub_path":"beecrowd/python/2154.py","file_name":"2154.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"42614481996","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Contents \n#
    \n#
  1. Importing libraries
  2. \n#
  3. Importing and organizing the data\n#
      \n#
    1. Convert the columns types for the features to float
    2. \n#
    3. Convert the class label types to int
    4. \n#
    \n#
  4. \n#
  5. Data Analysis and Preprocessing\n#
      \n#
    1. Missing Data Analysis\n#
        \n#
      1. Generate Sparsity Matrix for the missing data
      2. \n#
      3. Generate Heat Map for the missing data
      4. \n#
      \n#
    2. \n#
    3. Data Imputation\n#
        \n#
      1. Mean Imputation
      2. \n#
      3. K-NN
      4. \n#
      \n#
    4. \n#
    5. Dealing with imbalanced data\n#
        \n#
      1. Oversampling with SMOTE
      2. \n#
      \n#
    6. \n#
    \n#
  6. \n#
  7. Data Modeling\n#
      \n#
    1. K-Fold Cross validation
    2. \n#
    3. Models\n#
        \n#
      1. Adaboost classifier
      2. \n#
      3. Nueral Network classifier
      4. \n#
      5. K-NearestNeighbor
      6. \n#
      7. Random Forest classifier
      8. \n#
      9. Extreme Gradient Boosting classifier
      10. \n#
      11. Bagging classifier
      12. \n#
      \n#
    4. \n#
    \n#
  8. \n#
  9. Model Analysis\n#
      \n#
    1. Model ranking
    2. \n#
    3. Effect of varying number of estimators on the accuracy scores on different datasets
    4. \n#
    5. Plotting effect of number of estimators on Accuracy
    6. \n#
    \n#
  10. \n#
  11. References
  12. \n#
\n\n# ## 1. Importing libraries\n\n# In[49]:\n\n\n# To supress warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n# Basic Libraries for Data organization, Statistical operations and Plotting\nimport math\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# For loading .arff files\nfrom scipy.io import arff\n# To analyze the type of missing data\nimport missingno as msno\n# Library to perform Expectation-Maximization (EM) imputation\nimport impyute as impy\n# To perform mean imputation\nfrom sklearn.impute import SimpleImputer\n#To perform kFold Cross Validation\nfrom sklearn.model_selection import KFold\n# Formatted counter of class labels\nfrom collections import Counter\n# Ordered Dictionary\nfrom collections import OrderedDict\n# Library imbalanced-learn to deal with the data imbalance. To use SMOTE oversampling\nfrom imblearn.over_sampling import SMOTE \n\n# Impoting classification models\nfrom xgboost import XGBClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nimport random\n\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import precision_recall_curve\n\n\n# In[50]:\n\n\n# Library for performing k-NN imputations \nimport fancyimpute\n\n\n# ## 2. Importing and organizing the data\n\n# Dataset Link:[Polish bankruptcy dataset](https://archive.ics.uci.edu/ml/datasets/Polish+companies+bankruptcy+data)\n\n# The column labels for the features are like `X1`, `X2`, ... , `X64`. The class label is `Y`. \n# Print the first 5 rows of a dataframe, to see how the data looks like.\n\n# In[51]:\n\n\n############################################################\n# Loads the raw .arff files into a list\ndef load_arff_raw_data():\n return arff.loadarff('D:\\\\Iris\\\\新加坡项目\\\\Fin\\\\phrase2\\\\project\\\\ironman-master\\\\5year.arff')\n\n############################################################\n# Loads the raw .arff files into pandas dataframes\ndef load_dataframes():\n return pd.DataFrame(load_arff_raw_data()[0])\n\n############################################################\n# Set the column headers from X1 ... X64 and the class label as Y\ndef set_new_headers(dataframes):\n cols = ['X' + str(i+1) for i in range(len(dataframes.columns)-1)]\n cols.append('Y')\n dataframes.columns = cols\n\n############################################################\n# dataframes is the list of pandas dataframes \ndataframes = load_dataframes()\n\n# Set the new headers for the dataframes. The new headers will have the renamed set of feature (X1 to X64)\nset_new_headers(dataframes) \n\n# print the first 5 rows of dataset\ndataframes.head()\n\n\n# #### 2.A Convert the columns types for the features to float\n# The numeric data shown in the dataframe above is infact a python object. Let us convert all the numberic features for the dataframe into float to maintain consistency.\n\n# In[52]:\n\n\n# Convert the dtypes of all the columns (other than the class label columns) to float.\ndef convert_columns_type_float(dfs):\n \n index = 1\n while(index<=63):\n colname = dfs.columns[index]\n col = getattr(dfs, colname)\n dfs[colname] = col.astype(float)\n index+=1\n \nconvert_columns_type_float(dataframes)\ndataframes.head()\n\n\n# #### 2.B Convert the class label types to int\n# If we look the class label `Y`, we notice that the values are shown either as `b'0'` or `b'1'` \n# They actually correspond to bankruptcy being false and true respectively. \n# It is convenient to convert them to binary integers 0 and 1 respectively.\n\n# In[53]:\n\n\n# The class labels for all the dataframes are originally in object type.\n# Convert them to int types\ndef convert_class_label_type_int(dfs):\n col = getattr(dfs, 'Y')\n dfs['Y'] = col.astype(int)\n \nconvert_class_label_type_int(dataframes)\ndataframes.head()\n\n\n# ## 3. Data Analysis and Preprocessing\n\n# ### 3.A Missing Data Analysis\n# Surely, there is missing data. Let us now see how much of it is missing\n\n# In[54]:\n\n\n############################################################\n# Get Clean dataframes by dropping all the rows which have missing values\ndef drop_nan_rows(dataframes, verbose=False):\n clean_dataframes = dataframes.dropna(axis=0, how='any') \n if verbose:\n print('5year:','Original Length=', len(dataframes), '\\tCleaned Length=', len(clean_dataframes), '\\tMissing Data=', len(dataframes)-len(clean_dataframes))\n return clean_dataframes\n\n# Doing a quick analysis of how many missing values are there in each of the 5 dataframes\nnan_dropped_dataframes = drop_nan_rows(dataframes, verbose=True)\n\n\n# The above step shows us that there are a lot of rows in the dataframe which have missing data in at least one of the features. The missing-data-rows correspond to more than 50% of the entire data. \n\n# #### 3.A.a Generate Sparsity Matrix for the missing data\n# Now that we have established that there is a lot of missing data, let us find out if the missing data has some correlation. \n# The `matrix` function from the `missingno` library helps us generate sparsity matrix, which shows us the gaps in the data.\n\n# In[55]:\n\n\n# generate the sparsity matrix (figure) for all the dataframes\ndef generate_sparsity_matrix(dfs):\n missing_df = dfs.columns[dfs.isnull().any()].tolist()\n msno.matrix(dfs[missing_df], figsize=(20,5))\n\ngenerate_sparsity_matrix(dataframes)\n\n\n# From the above plots of sparsity for the dataframe, we could notice a lot of sparsity for the feature `X37` has the highest sparsity among all the features. \n\n# #### 3.A.b Generate Heat Map for the missing data \n# Now, let us find out if there is some correlation among the missing features. \n# \n# Using the `heatmap` function from `missingno` library, let us plot the heatmaps for the dataframe.\n\n# In[56]:\n\n\n# generate the heatmap for all the dataframes\ndef generate_heatmap(dfs):\n missing_df = dfs.columns[dfs.isnull().any()].tolist()\n msno.heatmap(dfs[missing_df], figsize=(20,20))\n \ngenerate_heatmap(dataframes) \n\n\n# The heat maps above, for the dataframe, describe the degree of nullity relationship between different features. The range of this nullity correlation is from -1 to 1 (-1 ≤ R ≤ 1). \n# Features with no missing value are excluded in the heatmap. If the nullity correlation is very close to zero (-0.05 < R < 0.05), no value will be displayed. \n# \n# A perfect positive nullity correlation (R=1) indicates when the first feature and the second feature both have corresponding missing values. \n# \n# A perfect negative nullity correlation (R=-1) means that one of the features is missing and the second is not missing. \n# \n# The takeaway is that, in each dataframe, there are some features that are heavily correlated (R = 1 or -1) and also there are features that are not essentially correlated (R values close to 0)\n\n# ### 3.B Data Imputation\n# \n# It is now established that we need to impute (fill in the gaps) the missing data, as dropping the missing rows or eliminating the missing features is not an option. \n# \n# We would like to explore some of the widely used missing data imputation techniques. \n# \n# 1. Mean Imputation (baseline method)\n# 2. k Nearest Neighbors (k-NN) Imputation\n# \n\n# #### 3.B.a Mean Imputation\n\n# In[57]:\n\n\ndef perform_mean_imputation(dfs):\n # Construct an imputer with strategy as 'mean', to mean-impute along the columns\n imputer = SimpleImputer(missing_values=np.nan, strategy='mean')\n mean_imputed_dfs = pd.DataFrame(imputer.fit_transform(dfs))\n \n mean_imputed_dfs.columns = dfs.columns \n return mean_imputed_dfs\n\nmean_imputed_dataframes = perform_mean_imputation(dataframes)\n\n\n# #### 3.B.b k-Nearest Neighbors (k-NN) Imputation\n\n# In[58]:\n\n\ndef perform_knn_imputation(dfs):\n knn_imputed_datasets = fancyimpute.KNN(k=100,verbose=True).fit_transform(dfs)\n return pd.DataFrame(data=knn_imputed_datasets) \n \nknn_imputed_dataframes = perform_knn_imputation(dataframes)\nset_new_headers(knn_imputed_dataframes)\n\n\n# In the above 2 steps, we have successfully created 2 differently imputed dataframes using: Mean, k-NN techniques respectively. \n# \n# Here below, we create a dictionary of all the imputed dataframes to re-use them in the future. \n\n# In[59]:\n\n\nimputed_dataframes_dictionary = OrderedDict()\nimputed_dataframes_dictionary['Mean'] = mean_imputed_dataframes\n#imputed_dataframes_dictionary['k-NN'] = knn_imputed_dataframes\n\n\n# ### ---------------------------------------------------------------------------------------------------------------------------------------------------------\n\n# ### 3.C Dealing with imbalanced data \n# \n# In the steps seen above, we have successfully dealt with the missing data. But we have not dealt with the class imbalance (if any) in the data. Simply put, Data Imbalance is a condition where the samples belonging to one or more 'majority' class labels of a labelled dataset heavily outnumber the sample belonging to the other 'minority' classes. \n# \n# Data imbalance critically affects the modeling as the models won't have sufficient data belonging to minority classes to train on and this leads to biased models, ultimately leading to poor performance on test data. \n# \n# Firstly, let us see if our data is imbalanced, and to what extent.\n\n# In[60]:\n\n\ndef check_data_imbalance(dfs):\n \n print('Dataset: ')\n print(dfs.groupby('Y').size())\n minority_percent = (dfs['Y'].tolist().count(1) / len(dfs['Y'].tolist()))*100\n print('Minority (label 1) percentage: '+ str(minority_percent) + '%')\n print('-'*64)\n \ncheck_data_imbalance(dataframes)\ntype(dataframes)\n\n\n# We have seen in the step above that there is a lot of data imbalance for our datasets, as indicated by the percentage of minority class (label `1`) samples among their datasets. With this huge magnitude of data imbalance, the models will not train wel if we leave them as is. \n\n# #### 3.C.a Oversampling with SMOTE (Synthetic Minority Over Sampling Technique)\n\n# In[61]:\n\n\n# Split the features and labels into separate dataframes for all the original dataframes\ndef split_dataframes_features_labels(dfs):\n feature_dfs = dfs.iloc[:,0:64]\n label_dfs = dfs.iloc[:,64] \n return feature_dfs, label_dfs\n\n# Performs the SMOTE oversampling fro given dataframes.\ndef oversample_data_SMOTE(dfs, verbose=False):\n smote = SMOTE(sampling_strategy=1 , random_state=42, k_neighbors=10)\n #Split the features and labels for each dataframe\n feature_dfs, label_dfs = split_dataframes_features_labels(dfs)\n resampled_feature_arrays = []\n resampled_label_arrays = []\n \n if verbose: print('Dataset: ' )\n if verbose: print('Original dataset shape {}'.format(Counter(label_dfs)))\n df_features_res, df_label_res = smote.fit_resample(feature_dfs, label_dfs)\n if verbose: print('Resampled dataset shape {}\\n'.format(Counter(df_label_res)))\n # Append the resampled feature and label arrays of ith dataframe to their respective list of arrays \n resampled_feature_arrays.append(df_features_res)\n resampled_label_arrays.append(df_label_res) \n return resampled_feature_arrays, resampled_label_arrays\n\n# Utility Function to convert the arrays of features and labels to pandas dataframes, and then join them.\n# Also re-assign the columns headers.\ndef restructure_arrays_to_dataframes(feature_arrays, label_arrays):\n resampled_dfs =[]\n for i in range(len(feature_arrays)):\n feature_df = pd.DataFrame(data=feature_arrays[i])\n label_df = pd.DataFrame(data=label_arrays[i])\n # Must set the column header for label_df, otherwise it wont join with feature_df, as columns overlap (with col names '0')\n label_df.columns=['Y'] \n resampled_dfs.append(feature_df.join(label_df))\n # re-assign the column headers for features and labels \n set_new_headers(resampled_dfs[0]) \n return resampled_dfs\n\n# Perform SMOTE oversampling on all the imputed dataframes, and return them in a dictionary.\ndef perform_oversampling_on_imputed_dataframes(df_dict):\n imputed_oversampled_dataframes_dictionary = OrderedDict()\n for key,dfs in df_dict.items():\n print('SMOTE Oversampling for ' + key + ' imputed dataframes\\n')\n smote_feature_arrays, smote_label_arrays = oversample_data_SMOTE(dfs, verbose=True)\n oversampled_dataframes = restructure_arrays_to_dataframes(smote_feature_arrays, smote_label_arrays)\n imputed_oversampled_dataframes_dictionary[key] = oversampled_dataframes\n print('-'*100)\n return imputed_oversampled_dataframes_dictionary\n\nimputed_oversampled_dataframes_dictionary = perform_oversampling_on_imputed_dataframes(imputed_dataframes_dictionary)\nimputed_oversampled_dataframes_dictionary\n\n\n# ## 4. Data Modeling: Building Classification Models\n\n# ### 4.A K-Fold Cross Validation\n\n# In[62]:\n\n\ndef prepare_kfold_cv_data(k, X, y, verbose=False):\n X = X.values\n y = y.values\n kf = KFold(n_splits=k, shuffle=False, random_state=None)\n X_train = []\n y_train = []\n X_test = []\n y_test = []\n \n for train_index, test_index in kf.split(X):\n X_train.append(X[train_index])\n y_train.append(y[train_index])\n X_test.append(X[test_index])\n y_test.append(y[test_index])\n return X_train, y_train, X_test, y_test\n\n\n# ### 4.B MODELS\n\n# ### 4.B.a Adaboost classifier\n\n# In[63]:\n\n\n# Adaboost classifier\nada_classifier = AdaBoostClassifier(n_estimators=10, random_state=42)\n\n\n# ### 4.B.b Nueral Network classifier\n\n# In[64]:\n\n\n# NN classifier\nnn_classifier=MLPClassifier(hidden_layer_sizes=(10,5,10),alpha=0.01,max_iter=300)\n\n\n# ### 4.B.c KNN Classifier\n\n# In[65]:\n\n\n#knn_classifier\nknn_classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)\n\n\n# ### 4.B.d Random Forest Classifier\n\n# In[66]:\n\n\n# Random Forest Classifier\nrf_classifier = RandomForestClassifier(n_estimators = 5, criterion = 'entropy')\n\n\n# ### 4.B.e Extreme Gradient Boosting Classifier\n\n# In[67]:\n\n\n# eXtreme Gradient Boosting Classifier (XGBClassifier)\nxgb_classifier = XGBClassifier()\n\n\n# ### 4.B.f Bagging Classifier\n\n# In[68]:\n\n\n# Bagging Classifier\nbag_classifier = BaggingClassifier()\n\n\n# In[69]:\n\n\n# creating a dictionary of models\nmodels_dictionary = OrderedDict()\n\nmodels_dictionary['Adaboost'] = ada_classifier\nmodels_dictionary['NN'] = nn_classifier\nmodels_dictionary['knn'] = knn_classifier\nmodels_dictionary['Extreme Gradient Boosting'] = xgb_classifier\nmodels_dictionary['Random Forest'] = rf_classifier\nmodels_dictionary['Bagging'] = bag_classifier\n\n\n# In[70]:\n\n\n# perform data modeling\nmodel_results = OrderedDict()\ndef perform_data_modeling(_models_, _imputers_, verbose=False, k_folds=5):\n \n # 6 Models\n # 2 Imputers\n # 2 dataset\n flag=1\n \n # Iterate over the models\n for model_name, clf in _models_.items():\n if verbose: print(\"-\"*120, \"\\n\", \"Model: \" + '\\033[1m' + model_name + '\\033[0m' + \" Classifier\")\n imputer_results = OrderedDict()\n \n # Iterate over the different imputed_data mechanisms (Mean, k-NN, EM, MICE)\n for imputer_name, dataframes_list in _imputers_.items():\n if verbose: print('\\tImputer Technique: ' + '\\033[1m' + imputer_name + '\\033[0m')\n \n # call the split_dataframes_features_labels function to get a list of features and labels for all the dataframes\n #feature_dfs, label_dfs = split_dataframes_features_labels(dataframes_list) \n feature_dfs = [dataframes_list[0].iloc[:,0:64]]\n label_dfs = [dataframes_list[0].iloc[:,64]]\n \n \n year_results = OrderedDict()\n \n # Iterate over dataframe_list individually\n for df_index in range(len(dataframes_list)):\n if verbose: print('\\t\\tDataset: ' + '\\033[1m' '5year.aff'+ '\\033[0m')\n \n # Calling the 'prepare_kfold_cv_data' returns lists of features and labels \n # for train and test sets respectively.\n # The number of items in the list is equal to k_folds\n X_train_list, y_train_list, X_test_list, y_test_list = prepare_kfold_cv_data(k_folds, feature_dfs[df_index], label_dfs[df_index], verbose)\n \n metrics_results = OrderedDict()\n accuracy_list = np.zeros([k_folds])\n precision_list = np.zeros([k_folds,2])\n recall_list = np.zeros([k_folds,2])\n TN_list = np.zeros([k_folds])\n FP_list = np.zeros([k_folds])\n FN_list = np.zeros([k_folds])\n TP_list = np.zeros([k_folds]) \n \n # Iterate over all the k-folds\n for k_index in range(k_folds):\n X_train = X_train_list[k_index]\n y_train = y_train_list[k_index]\n X_test = X_test_list[k_index]\n y_test = y_test_list[k_index]\n \n # Fit the model and \n clf = clf.fit(X_train, y_train)\n y_test_predicted = clf.predict(X_test)\n if(flag):\n X_test[0].tofile(\"1.txt\",\"w+\")\n print(X_test[0])\n flag-=1\n \n #code for calculating accuracy \n _accuracy_ = accuracy_score(y_test, y_test_predicted, normalize=True)\n accuracy_list[k_index] = _accuracy_\n \n #code for calculating recall \n _recalls_ = recall_score(y_test, y_test_predicted, average=None)\n recall_list[k_index] = _recalls_\n \n #code for calculating precision \n _precisions_ = precision_score(y_test, y_test_predicted, average=None)\n precision_list[k_index] = _precisions_\n \n #code for calculating confusion matrix \n _confusion_matrix_ = confusion_matrix(y_test, y_test_predicted)\n TN_list[k_index] = _confusion_matrix_[0][0]\n FP_list[k_index] = _confusion_matrix_[0][1]\n FN_list[k_index] = _confusion_matrix_[1][0]\n TP_list[k_index] = _confusion_matrix_[1][1]\n \n # creating a metrics dictionary\n metrics_results['Accuracy'] = np.mean(accuracy_list)\n metrics_results['Precisions'] = np.mean(precision_list, axis=0)\n metrics_results['Recalls'] = np.mean(recall_list, axis=0)\n metrics_results['TN'] = np.mean(TN_list)\n metrics_results['FP'] = np.mean(FP_list)\n metrics_results['FN'] = np.mean(FN_list)\n metrics_results['TP'] = np.mean(TP_list)\n \n if verbose:\n print('\\t\\t\\tAccuracy:', metrics_results['Accuracy'])\n print('\\t\\t\\tPrecision:', metrics_results['Precisions'])\n print('\\t\\t\\tRecall:', metrics_results['Recalls'])\n \n year_results[str(df_index+1)+'year'] = metrics_results \n \n imputer_results[imputer_name] = year_results\n \n model_results[model_name] = imputer_results \n \n return model_results \n\n\n# In[71]:\n\n\nresults = perform_data_modeling(models_dictionary, imputed_oversampled_dataframes_dictionary, verbose=True, k_folds=5)\n\n\n# In[72]:\n\n\n# Original Data\n#print(model_results)\n\ndef GetModelPerformance(s):\n if(model_results.get(s)==None): \n return -1\n \n d=model_results[s]['Mean']['1year']\n l=[]\n l.append(d['Accuracy'])\n l.append(list(d['Precisions']))\n l.append(list(d['Recalls']))\n return l\n# sample\nprint(GetModelPerformance('Bagging'))\n\n\n# In[97]:\n\n\n\n# perform data modeling\n\ndef perform_data_modeling_predict(select,_models_, _imputers_,vector, verbose=False, k_folds=5):\n \n # 6 Models\n # 2 Imputers\n # 2 dataset\n flag=3\n predict_results = []\n final=0\n # Iterate over the models\n for model_name, clf in _models_.items():\n factor=GetModelPerformance(model_name)[0]\n weight=0.1\n if (select == model_name):\n weight=0.5\n if verbose: print(\"-\"*120, \"\\n\", \"Model: \" + '\\033[1m' + model_name + '\\033[0m' + \" Classifier\")\n imputer_results = OrderedDict()\n #print(model_name)\n # Iterate over the different imputed_data mechanisms (Mean, k-NN, EM, MICE)\n for imputer_name, dataframes_list in _imputers_.items():\n if verbose: print('\\tImputer Technique: ' + '\\033[1m' + imputer_name + '\\033[0m')\n \n # call the split_dataframes_features_labels function to get a list of features and labels for all the dataframes\n #feature_dfs, label_dfs = split_dataframes_features_labels(dataframes_list) \n feature_dfs = [dataframes_list[0].iloc[:,0:64]]\n label_dfs = [dataframes_list[0].iloc[:,64]] \n \n \n # Iterate over dataframe_list individually\n for df_index in range(len(dataframes_list)):\n if verbose: print('\\t\\tDataset: ' + '\\033[1m' '5year.aff'+ '\\033[0m')\n \n # Calling the 'prepare_kfold_cv_data' returns lists of features and labels \n # for train and test sets respectively.\n # The number of items in the list is equal to k_folds\n X_train_list, y_train_list, X_test_list, y_test_list = prepare_kfold_cv_data(k_folds, feature_dfs[df_index], label_dfs[df_index], verbose)\n \n \n # Iterate over all the k-folds\n for k_index in range(k_folds):\n X_train = X_train_list[k_index]\n y_train = y_train_list[k_index]\n X_test = X_test_list[k_index]\n # y_test = y_test_list[k_index]\n \n test=np.array(vector, ndmin = 2) \n #print(type(X_test),type(test))\n \n # Fit the model and \n clf = clf.fit(X_train, y_train)\n y_test_predicted = clf.predict(test)\n # if(flag==0):\n # print(test)\n # print(X_test[0])\n # flag-=1\n \n predict_results.append(y_test_predicted[0]*factor)\n #print(y_test_predicted[0])\n # print(weight,factor)\n # print(predict_results)\n final+=weight*(sum(predict_results)/len(predict_results)) \n predict_results.clear() \n return final \ndef PredictBankruptcy(vector,model):\n \n results = perform_data_modeling_predict(model,models_dictionary, imputed_oversampled_dataframes_dictionary,vector,verbose=False, k_folds=5)\n return results\n\n# sample\n# v=[ 2.02590836e-02, 6.59132358e-01, -1.48797771e-02, 9.54218220e-01,\n# -3.37041726e+01 , 2.02590836e-02 , 2.13899843e-02 , 4.02446166e-01,\n# 1.01546503e+00 , 2.44376711e-01 , 2.13899843e-02 , 7.10979814e-02,\n# 2.41617289e-02 , 2.13899843e-02 , 4.97973389e+03 , 7.42467110e-02,\n# 1.55056522e+00 , 2.13899843e-02 , 1.02967534e-02 , 2.94924288e+01,\n# 1.09079077e+00 , 4.07358364e-02 , 9.74701639e-03 , 2.13899843e-02,\n# 2.44376711e-01 , 7.24454259e-02 , 4.63636810e+02 ,-2.34828331e-02,\n# 4.93311763e+00, 3.33519189e-01 , 1.02967534e-02 , 6.88893269e+01,\n# 6.49498273e+00 , 6.37395903e-02 , 4.07358364e-02 , 2.07399853e+00,\n# 6.41132750e-01 ,5.49772445e-01 , 2.01608440e-02 , 3.31928760e-02,\n# 3.19428952e-01 , 2.01608440e-02 , 6.31800749e+01 , 3.36863590e+01,\n# 3.49861367e-01 , 6.21354956e-01 , 3.01741328e+01 , 1.40083219e-02,\n# 6.29586849e-03 , 5.00135377e-01 , 3.53725916e-01 , 1.88731542e-01,\n# 3.49527135e-01 , 8.23196604e-01 ,-1.27800718e+03 , 1.51641780e-02,\n# 9.57488002e-02 , 9.84837406e-01 , 1.65501067e+00 , 3.30527780e+01,\n# 1.09075326e+01 , 6.75938065e+01 , 6.57669517e+00 , 3.11362172e+00]\n#\n# md=\"Adaboost\"\n# print(\"Result:\",PredictBankruptcy(v,md))\n\n\n# ## Model Analysis\n\n# ### Model Ranking\n\n# In[ ]:\n\n\n# model -> imputer \ndef perform_model_ranking(models, imputers, results):\n column_headers = ['-'] + list(imputers.keys())\n rows = []\n for model_name, model_details in results.items():\n row = [model_name]\n for imputer_name, imputer_details in model_details.items():\n mean_accuracy = 0\n for year, metrics in imputer_details.items():\n mean_accuracy += metrics['Accuracy']\n mean_accuracy = mean_accuracy/len(imputer_details)\n row.append(mean_accuracy)\n rows.append(row)\n results_df = pd.DataFrame(data=rows, columns = column_headers)\n return results_df\n\n\n# In[ ]:\n\n\nperform_model_ranking(models_dictionary, imputed_oversampled_dataframes_dictionary, results)\n\n\n# ### Bagging: Effect of varying number of estimators on the accuracy scores\n\n# ### Plot of effect of number of estimators on Accuracy for Balanced Bagging classifier\n\n# ## References\n\n# https://docs.scipy.org/doc/numpy-1.14.0/reference/\n# https://pandas.pydata.org/\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.io.arff.loadarff.html\n# https://github.com/iskandr/fancyimpute\n# https://pypi.org/project/impyute/\n# http://scikit-learn.org/stable/modules/preprocessing.html\n# http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html\n# https://docs.python.org/3/library/collections.html\n# http://xgboost.readthedocs.io/en/latest/python/python_api.html\n# http://scikit-learn.org/stable/modules/svm.html\n# http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html\n# http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\n# http://contrib.scikit-learn.org/imbalanced-learn/stable/generated/imblearn.ensemble.BalancedBaggingClassifier.html\n# http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeClassifier.html\n# http://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.GaussianNB.html\n# https://docs.python.org/2/library/random.html\n# http://scikit-learn.org/stable/modules/classes.html\n\n# ## End of Project\n","repo_name":"StellaBYR/NUS-Bankruptcy-Prediction","sub_path":"BankruptcyPrediction.py","file_name":"BankruptcyPrediction.py","file_ext":"py","file_size_in_byte":28760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"1003684374","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom pymysql import connect\nfrom twisted.enterprise import adbapi #这个模块专门进行数据库处理的\nfrom pymysql import cursors\n\"\"\"\n 1 open_spider和close_spider 的使用\n 作为一个下载中间件,可以在__init__的方法中做一些初始化的操作,\n 也可以在open_spider中进行一些初始化操作,它是在爬虫开始时执行的方法,且执行一次\n 可以在close_spider中执行一些关闭的操作,它是在爬虫结时执行的方法,且执行一次\n \n 2 在进行数据数据库连接的时候\n 可以这样做:\n self.mysqlConn = connect(host=\"\",port=\"\")\n 也可以这样做\n self.mysqlConn = connect(**self.dbParams)\n \n 3 保存到数据库的异步操作,from twisted.enterprise import adbapi\n (1) pool的建立\n (2) defer.addErrback 添加错误处理方法\n \n\"\"\"\n#----------------------------------同步插入数据库---------------------\nclass Jianshu02Pipeline(object):\n\n # def __init__(self):\n # self.dbParams = {\n # \"host\":\"127.0.0.1\",\n # \"user\":\"root\",\n # \"password\":\"123456\",\n # \"database\":\"test\",\n # \"port\":3306,\n # \"charset\":\"utf8\"\n # }\n # self.mysqlConn = connect(**self.dbParams)\n # self.cursor = self.mysqlConn.cursor()\n # self._insertSql = None\n\n def open_spider(self,spider):\n print(\"执行pipeline----open_spider\")\n self.dbParams = {\n \"host\": \"127.0.0.1\",\n \"user\": \"root\",\n \"password\": \"123456\",\n \"database\": \"test\",\n \"port\": 3306,\n \"charset\": \"utf8\"\n }\n self.mysqlConn = connect(**self.dbParams)\n self.cursor = self.mysqlConn.cursor()\n self._insertSql = None\n\n def close_spider(self,spider):\n print(\"执行pipeline----close_spider\")\n self.cursor.close()\n self.mysqlConn.close()\n\n\n @property\n def insertSql(self):\n if not self._insertSql:\n self._insertSql = \"\"\"\n insert into jianshu(articleid,url,content,title,author,publicdate,text_num,read_num,comment_num,like_num,cated,stars) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n \"\"\"\n return self._insertSql\n return self._insertSql\n\n def process_item(self, item, spider):\n try:\n self.cursor.execute(self.insertSql,(item[\"articleid\"],item[\"url\"],item[\"content\"],item[\"title\"],\n item[\"author\"], item[\"publicdate\"], item[\"text_num\"], item[\"read_num\"],\n item[\"comment_num\"], item[\"like_num\"], item[\"cated\"], item[\"stars\"]))\n\n self.mysqlConn.commit()\n\n except Exception as e:\n print(\"e============\"+str(e.args))\n\n\n#-------------------------异步插入数据库---------------------------\n\n\nclass JianShuTwistedPipeline(object):\n\n #刚开始执行爬虫的时候执行\n def open_spider(self,spider):\n print(\"执行pipeline----open_spider\")\n self.dbParams = {\n \"host\": \"127.0.0.1\",\n \"user\": \"root\",\n \"password\": \"123456\",\n \"database\": \"test\",\n \"port\": 3306,\n \"charset\": \"utf8\",\n \"cursorclass\":cursors.DictCursor\n }\n\n #创建一个连接池\n self.dbpool = adbapi.ConnectionPool(\"pymysql\",**self.dbParams)\n self._insertSql = None\n\n #赋值insertSql语句\n @property\n def insertSql(self):\n if not self._insertSql:\n self._insertSql = \"\"\"\n insert into jianshu(articleid,url,content,title,author,publicdate,text_num,read_num,comment_num,like_num,cated,stars) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\n \"\"\"\n return self._insertSql\n return self._insertSql\n\n #insert_item 是一个可调用的函数\n def process_item(self, item, spider):\n defer = self.dbpool.runInteraction(self.insert_item,item)\n defer.addErrback(self.handleError,item,spider)\n\n #真正进行插入数据库时进行调用\n def insert_item(self,cursor,item):\n try:\n cursor.execute(self.insertSql,(item[\"articleid\"],item[\"url\"],item[\"content\"],item[\"title\"],\n item[\"author\"], item[\"publicdate\"], item[\"text_num\"], item[\"read_num\"],\n item[\"comment_num\"], item[\"like_num\"], item[\"cated\"], item[\"stars\"]))\n\n except Exception as e:\n print(\"e============\"+str(e.args))\n\n #如果发生了数据库的插入错误就会调用这个函数\n def handleError(self,error,item,spider):\n print(error)\n","repo_name":"kbbingbai/jianshu02","sub_path":"jianshu02/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"19746660356","text":"#!/usr/bin/env python3\n\nimport threading\nfrom socketIO_client import SocketIO, LoggingNamespace, logs\nfrom time import sleep\n\n\n\"\"\"\nAuthentication for the SocketIO backend\n\"\"\"\n\n\nclass CustomSocketIO(SocketIO):\n\n def _yield_warning_screen(self, seconds=None):\n yield from logs._yield_elapsed_time(seconds)\n\n\nclass Database(threading.Thread):\n\n def __init__(self, login_callback, fetch_callback,\n db_url=\"https://copyeverythingapp.com\", port=443):\n\n super(Database, self).__init__()\n self.login_callback = login_callback\n self.fetch_callback = fetch_callback\n self.credentials = {}\n self.online = False\n self._running = True\n self.db_url = db_url\n self.port = port\n self.sock = False\n self.start()\n\n def good(self):\n return self.auth and self.online\n\n def socket_connect(self):\n connected = False\n while(self._running and not connected):\n try:\n self.sock = CustomSocketIO(\n self.db_url, self.port, LoggingNamespace,\n verify=True)\n connected = True\n except:\n sleep(.5)\n\n def socket_setup(self):\n self.socket_connect()\n\n if(self._running):\n self.sock.on('connect', self.connected)\n self.sock.on('auth resp', self.authenticate_reply)\n self.sock.on('new server copy', self.fetch_callback)\n self.sock.on('disconnect', self.disconnected)\n\n while(self._running and not self.online):\n # print(\"Waiting for server acknowledgement...\")\n self.sock.wait(0.5)\n\n def connected(self):\n self.online = True\n\n def disconnected(self, data):\n # attempt to reconnect on disconnect\n self.auth = False\n if self.credentials:\n self.authenticate(self.credentials['username'],\n self.credentials['password'])\n\n def insert_new_paste(self, paste):\n if self.sock:\n self.sock.emit('new client copy', paste)\n else:\n self.login_callback(\"Unable to connect!\\n\"\n \"Check your internet connection.\")\n\n def authenticate(self, user, pswd):\n self.credentials = {\"username\": user,\n \"password\": pswd}\n\n def _authenticate(self):\n if not self.online or not self.sock:\n self.login_callback(\"Unable to connect!\\n\"\n \"Check your internet connection.\")\n print(\"Failed because too early\")\n return False\n\n self.sock.emit('auth', self.credentials)\n self.credentials = {}\n\n # reply will be in format: [bool_good, str_response]\n def authenticate_reply(self, data):\n if data[0]:\n self.auth = True\n outcome = \"good\"\n else:\n outcome = data[1]\n\n self.login_callback(outcome)\n\n def stop(self):\n self._running = False\n\n def run(self):\n self.socket_setup()\n\n while(self._running):\n if self.credentials:\n self._authenticate()\n self.sock.wait(0.5)\n\n\nif __name__ == \"__main__\":\n db = Database(lambda x: print(x), lambda x: print(x))\n sleep(1)\n db.authenticate(\"5westbury5@gmail.com\", \"testtest\")\n sleep(1)\n db.insert_new_paste(\"test\")\n db.stop()\n # db.get_latest_paste()\n # db.insert_new_paste(\"Not last jking\")\n","repo_name":"CopyEverything/CopyEverything-Desktop","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"20780478461","text":"from odoo import fields, models\nfrom odoo.tools import float_compare\n\nclass PurchaseOrderLine(models.Model):\n _inherit = 'purchase.order.line'\n\n def _update_received_qty(self):\n super(PurchaseOrderLine, self)._update_received_qty()\n for line in self.filtered(lambda x: x.move_ids and x.product_id.id not in x.move_ids.mapped('product_id').ids):\n bom = self.env['mrp.bom']._bom_find(product=line.product_id, company_id=line.company_id.id)\n if bom and bom.type_purchase == 'phantom':\n line.qty_received = line._get_bom_delivered(bom=bom)\n\nclass StockMove(models.Model):\n _inherit = 'stock.move'\n\n def action_explode(self):\n \"\"\" Explodes pickings \"\"\"\n # in order to explode a move, we must have a picking_type_id on that move because otherwise the move\n # won't be assigned to a picking and it would be weird to explode a move into several if they aren't\n # all grouped in the same picking.\n if not self.picking_type_id:\n return self\n bom = self.env['mrp.bom'].sudo()._bom_find(product=self.product_id, company_id=self.company_id.id)\n if self.picking_type_id.code == 'outgoing':\n if not bom or bom.type != 'phantom':\n return self\n if self.picking_type_id.code == 'incoming':\n if not bom or bom.type_purchase != 'phantom':\n return self\n if not bom:\n return self\n phantom_moves = self.env['stock.move']\n processed_moves = self.env['stock.move']\n factor = self.product_uom._compute_quantity(self.product_uom_qty, bom.product_uom_id) / bom.product_qty\n boms, lines = bom.sudo().explode(self.product_id, factor, picking_type=bom.picking_type_id)\n for bom_line, line_data in lines:\n phantom_moves += self._generate_move_phantom(bom_line, line_data['qty'])\n\n for new_move in phantom_moves:\n processed_moves |= new_move.action_explode()\n # if not self.split_from and self.procurement_id:\n # # Check if procurements have been made to wait for\n # moves = self.procurement_id.move_ids\n # if len(moves) == 1:\n # self.procurement_id.write({'state': 'done'})\n if processed_moves and self.state == 'assigned':\n # Set the state of resulting moves according to 'assigned' as the original move is assigned\n processed_moves.write({'state': 'assigned'})\n # delete the move with original product which is not relevant anymore\n self.sudo().unlink()\n return processed_moves","repo_name":"BADEP/addons","sub_path":"purchase_mrp_bom_type/models/purchase_mrp.py","file_name":"purchase_mrp.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"89"} +{"seq_id":"11167362521","text":"from zrb.helper.typecheck import typechecked\nfrom zrb.helper.typing import List, Mapping\nfrom zrb.task_group.group import Group\nfrom zrb.task.any_task import AnyTask\nfrom zrb.task_env.env import Env\nfrom zrb.helper.string.jinja import is_probably_jinja\n\n\n@typechecked\ndef fetch_env_map_from_group(\n env_map: Mapping[str, str], group: Group\n) -> Mapping[str, str]:\n for task in group.get_tasks():\n env_map = fetch_env_map_from_task(env_map, task)\n for sub_group in group.get_children():\n sub_env_map: Mapping[str, str] = fetch_env_map_from_group(\n env_map, sub_group\n )\n env_map = cascade_env_map(env_map, sub_env_map)\n return env_map\n\n\n@typechecked\ndef fetch_env_map_from_task(\n env_map: Mapping[str, str], task: AnyTask\n):\n task_env_map: Mapping[str, str] = {}\n for env_file in task.get_env_files():\n envs = env_file.get_envs()\n task_env_map = add_envs_to_env_map(task_env_map, envs)\n task_env_map = add_envs_to_env_map(task_env_map, task._envs)\n env_map = cascade_env_map(env_map, task_env_map)\n for upstream in task.get_upstreams():\n task_env_map = fetch_env_map_from_task(env_map, upstream)\n for checker in task.get_checkers():\n task_env_map = fetch_env_map_from_task(env_map, checker)\n return env_map\n\n\n@typechecked\ndef add_envs_to_env_map(\n env_map: Mapping[str, str], envs: List[Env]\n) -> Mapping[str, str]:\n for env in envs:\n if env.os_name == '':\n continue\n env_name = get_env_name(env)\n env_default = get_env_default(env)\n env_map[env_name] = env_default\n return env_map\n\n\n@typechecked\ndef cascade_env_map(\n env_map: Mapping[str, str],\n other_env_map: Mapping[str, str]\n) -> Mapping[str, str]:\n for key, value in other_env_map.items():\n if key in env_map:\n continue\n env_map[key] = value\n return env_map\n\n\n@typechecked\ndef get_env_name(env: Env) -> str:\n if env.os_name is None:\n return env.name\n return env.os_name\n\n\n@typechecked\ndef get_env_default(env: Env) -> str:\n if is_probably_jinja(env.default):\n return ''\n return env.default\n","repo_name":"state-alchemists/zrb","sub_path":"src/zrb/helper/env_map/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"91"} +{"seq_id":"43930526955","text":"from operator import truediv\nimport os\nimport re\nimport queue\nimport json\nimport this\n\n\n\n# 从目录下的data文件夹中读取html文本数据\n# 依次读取(startid).txt、(startid + step).txt、(startid + step * 2).txt、...、endid.txt\n# 对相邻两个版本的html文本做句子匹配,默认以换行符划分,可修改divideStr调整划分策略\n# hashmax为hash函数的模数\nstartid = 3891\nendid = 3895\nstep = 1\nhashmax = 10000007\ndivideStr_lv1 = r\"\\n\\r\"\ndivideStr_lv2 = r\",.\"\ndivideStr_lv3 = r\" \"\ndivideStr_level = [divideStr_lv1, divideStr_lv2, divideStr_lv3]\n\nempty_char = r\"\\n\\r,. \" #these char seem meaningless\ndef judge_emptystring(thisstr):\n l = len(thisstr)\n if (l == 0): return 1\n for i in range(l):\n flag = 0\n l2 = len(empty_char)\n for j in range(l2):\n if (thisstr[i] == empty_char[j]):\n flag = 1\n if (flag == 0): return 0\n return 1\n\ndef recursive_match(str1, str2, level):\n if (level == len(divideStr_level)) : return {}\n dict = {\n \"divideLevel\" : level,\n \"matchList\" : []\n }\n arr1 = mysplit(str1, divideStr_level[level])\n\n # return items like this\n # dict = {\n # \"divideLevel\" : 1,\n # \"matchList\" : [\n # {\n # \"matched\" : true,\n # \"moreInfo\" : {\n # \"sameString\" : \"stringContent...\"\n # }\n # },\n # {\n # \"matched\" : false,\n # \"moreInfo\" : {\n # \"string1\" : \"stringContent1...\",\n # \"string2\" : \"stringContent2...\",\n # \"goDown\" : {\n # \"divideLevel\" : 2,\n # \"matchList\" : [\n # {},{}\n # ]\n # }\n # }\n # },\n # {\n # \n # }\n # ]\n # }\n return 1\n\ndef getdiff(oldFile, newFile):\n OFile = open(oldFile, encoding='utf-8').read()\n NFile = open(newFile, encoding='utf-8').read()\n Ostr = mysplit(divideStr, OFile)\n Nstr = mysplit(divideStr, NFile)\n Olines = len(Ostr)\n Nlines = len(Nstr)\n maxlines = max(Olines, Nlines) + 5\n\n def hashcode(str):\n l = len(str)\n s = 0\n state = 0\n i = 0\n while (i < l):\n if (state == 0):# 此段逻辑如下:将cite_note cite_ref等开头直到下一个 0) & (b > 0)):\n if ((Ohash[a-1] == Nhash[b-1])):\n if ((Omatch[a-1] <= 0) & (Nmatch[b-1] <= 0)):\n q.put([a-1,b-1])\n matchPair(a-1, b-1)\n if ((a < Olines - 1) & (b < Nlines - 1)):\n if ((Ohash[a+1] == Nhash[b+1])):\n if ((Omatch[a+1] <= 0) & (Nmatch[b+1] <= 0)):\n q.put([a+1,b+1])\n matchPair(a+1, b+1)\n\n \n for i in range(Olines):\n addMatch(matchData, i, Omatch[i])\n\n for i in range(Nlines):\n if (Nmatch[i] == -1):\n addMatch(matchData, -1, i)\n\n return matchData\n\n\nallData = []\nid = startid\nwhile (id + step <= endid):\n nextid = id + step\n thisdict = {\n \"OldVersionNumber\" : id,\n \"NewVersionNumber\" : nextid,\n \"MatchedPairs\" : []\n }\n thisdict[\"MatchedPairs\"] = getdiff(id, nextid)\n allData.append(thisdict)\n id = nextid\nallDataStr = json.dumps(allData, indent=4)\nwith open(\"difference.json\", \"w\") as f:\n f.write(allDataStr)\n","repo_name":"UWzhuojiu/VIS_WIKI","sub_path":"revisionMatrix/level_diff.py","file_name":"level_diff.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"32640898318","text":"# Jonathan Lynch\n# 4/24/20\n# https://youtu.be/FoYlFbQ7ZwY\n# \"I have not given or received any unauthorized assistance on this assignment.\"\n\ndef humanPyramid(r, c):\n 'For row and column entered, returns the total weight on person in a human pyramid'\n if r == 0: # base case when row is zero\n return 0\n elif c == 0: # base case when column is zero (left side of pyramid)\n return (128 + humanPyramid(r-1, c))/2 # recursive function for column zero\n elif c == r:\n return (128 + humanPyramid(r-1, c-1))/2 # recursive function for right side of pyramid\n elif r == 2 and c == 1:\n return 2*humanPyramid(2,2) # weight on person in the middle (second row)\n elif r == 3 and c == 1 or r == 3 and c == 2:\n return humanPyramid(3,0) + (humanPyramid(2,1) + 128)/2 # weight on people in the middle (third row)\n elif r == 4 and c == 1 or r == 4 and c == 3:\n return humanPyramid(4,0) + (humanPyramid(3,2) + 128)/2 # weight on people in bottom row, first and third columns \n elif r == 4 and c == 2:\n return 2*((humanPyramid(3,2) + 128)/2) # weight on person in bottom row, middle\n\n","repo_name":"Jon-Lynch/CSC_430","sub_path":"human_pyramid.py","file_name":"human_pyramid.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2463324303","text":"\"\"\"\nScript for training models\n\"\"\"\n\n# import numpy as np\nimport os\n# import PIL\nimport tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential\nimport pathlib\n\nimport matplotlib.pyplot as plt\n\ndata_dir = pathlib.Path('./data/small_set')\n\nbatch_size = 32\nimg_height = 180\nimg_width = 180\n\ntrain_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"training\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\nval_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n subset=\"validation\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n\n\nnum_classes = 3\n\ndata_augmentation = keras.Sequential(\n [\n layers.experimental.preprocessing.RandomFlip(\"horizontal\",\n input_shape=(img_height,\n img_width,\n 3)),\n layers.experimental.preprocessing.RandomRotation(0.1),\n layers.experimental.preprocessing.RandomZoom(0.1),\n ]\n)\n\nmodel = Sequential([\n data_augmentation,\n layers.experimental.preprocessing.Rescaling(1. / 255, input_shape=(img_height, img_width, 3)),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Dropout(0.2),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes)\n])\n\nmodel.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\nout_path = \"./models/training_3/\"\ncheckpoint_path = out_path + \"cp.ckpt\"\ncheckpoint_dir = os.path.dirname(checkpoint_path)\n\n# Create a callback that saves the model's weights\ncp_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path,\n save_weights_only=True,\n verbose=1)\n\nepochs = 20\n\nhistory = model.fit(train_ds,\n validation_data=val_ds,\n epochs=epochs,\n callbacks=[cp_callback])\n\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\nepochs_range = range(epochs)\n\nplt.figure(figsize=(8, 8))\nplt.subplot(1, 2, 1)\nplt.plot(epochs_range, acc, label='Training Accuracy')\nplt.plot(epochs_range, val_acc, label='Validation Accuracy')\nplt.legend(loc='lower right')\nplt.title('Training and Validation Accuracy')\n\nplt.subplot(1, 2, 2)\nplt.plot(epochs_range, loss, label='Training Loss')\nplt.plot(epochs_range, val_loss, label='Validation Loss')\nplt.legend(loc='upper right')\nplt.title('Training and Validation Loss')\nplt.savefig(out_path + 'training_acc_loss.png', dpi=300)\n","repo_name":"brendanlafferty/cat-id","sub_path":"modeling.py","file_name":"modeling.py","file_ext":"py","file_size_in_byte":3067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10900393637","text":"from commonutils_spider import CommonsMysqlUtils\nfrom commonutils_spider import CommonsInitValue\nfrom selenium import webdriver\nimport uuid\n\ndef crawDailyComments(link):\n\n currentList = []\n browsor = webdriver.PhantomJS()\n browsor.get(link)\n contextList = browsor.find_elements_by_class_name('news-item')\n for mainContext in contextList:\n pubDate = CommonsInitValue.initNowTime()\n title = mainContext.find_element_by_tag_name('a').text\n linkUrl = mainContext.find_element_by_tag_name('a').get_attribute('href')\n descriptContext = mainContext.find_element_by_class_name('desc').text\n currentList.append([str(uuid.uuid1()),linkUrl,title,pubDate,descriptContext,'FOREX','ADSNET'])\n return currentList\n\n\ndef writeDailyComments():\n link = 'http://www.ads-securities.com/zhs/market-research'\n currentArray = crawDailyComments(link)\n\n dbManager = CommonsMysqlUtils._dbManager\n SQL = \"DELETE FROM COMMENTS_NEWS_RESOURCE_TABLE WHERE SOURCEFLAG = 'ADSNET' AND COMMENTFLAG='FOREX' \"\n dbManager.executeUpdateOrDelete(SQL)\n\n formatSQL = ' INSERT COMMENTS_NEWS_RESOURCE_TABLE ' \\\n ' (KEYID,LINKURL,TITLE,PUBDATE,DESCRIPTCONTEXT,COMMENTFLAG,SOURCEFLAG)' \\\n ' VALUES (%s,%s,%s,%s,%s,%s,%s)'\n dbManager.executeManyInsert(formatSQL,currentArray)","repo_name":"wusezhangserver/kttspider","sub_path":"comment_spider/AdsNetSpider.py","file_name":"AdsNetSpider.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"36735893383","text":"class Solution:\n # @param A : string\n # @param B : string\n # @return a strings\n def addBinary(self, A, B):\n return bin(int(A,2)+int(B,2))[2:]\n\n \nif __name__=='__main__':\n n = int(input())\n for i in range(n):\n S = Solution()\n c1 = input()\n c2 = input()\n print (S.addBinary(c1,c2))\n \n\n'''\nADDBINARY\n\nGiven two binary strings, return their sum (also a binary string).\n\nExample:\n\na = \"100\"\n\nb = \"11\"\n\nReturn a + b = “111”.\n'''\n","repo_name":"Cbkhare/Challenges","sub_path":"InterviewBit_Strings_AddBinary.py","file_name":"InterviewBit_Strings_AddBinary.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"30523720206","text":"\n####################################################################################################\n## 네이버 금융에서 코스피 200의 주요제무정보를 여러 엑셀에 sheet 를 만들어 저장 \n#####################################################################################################\n#1. 한국 거래소 종목 가져오기\nstock_code=pd.read_html('http://kind.krx.co.kr/corpgeneral/corpList.do?method=download',header=0)[0]\n\n#한글 컬럼명을 영어로 변경\nstock_code = stock_code.rename(columns={'회사명': 'company', '종목코드': 'code'})\n\n#종목코드가 6자리이기 때문에 6자리르 맞춰주기 위해 설절해줌\nstock_code.code=stock_code.code.map('{:06d}'.format)\n\n#자동으로 excel 저장.\npath='C:\\\\ITWILL\\\\sihun'\nkospi=pd.read_excel(path+'\\\\code.xlsx')\ncompany1=list(kospi.iloc[1:,1])\n\ncnt=-1\nerror=[]\nexcel=[]\nfor company in company1:\n print(company)\n try :\n cnt+=1\n code=stock_code[stock_code.company==company].code.values[0]\n url1=f'https://finance.naver.com/item/main.nhn?code={code}'\n table1=pd.read_html(url1,encoding=\"cp949\")\n excel.append(table1[3]) \n \n \n except Exception as e:\n print('error company :', company,cnt)\n error.append(cnt)\n \nprint(error) # error 나오는 색인 from cmpany1 list.\n#[6, 8, 11, 25, 26, 32, 33, 39, 49, 54, 64, 71, 88, 97, 118, 123, 146, 156, 170]\n'''\n# 200개의 회사를 200개의 excel 파일에 저장\nfor a in range(0,200):\n excel[a].to_excel(excel_writer=f'C:\\\\ITWILL\\\\sihun\\\\kopsi excel\\\\lg_df{a}.xlsx') \n''' \n# 200 개의 excel 파일이 만들어 진다. 이것은 나머지 작업을 함에 있어 효율적이지 못하다.\n\n################################################################################\n## 하나의 액셀에 여러 sheet 만들어서 각각의 sheet에 주식 정보 저장 -> Data load continue\n##################################################################################\n'''\n# 오류 발생 왜냐하면 하나의 기업이 지워지면 색인이 당겨져서 다음 색인 삭제시 다른 기업 이사라짐.\ncompany1=list(kospi.iloc[1:,1])\nnumber=[6, 8, 11, 25, 26, 32, 33, 39, 49, 54, 64, 71, 88, 97, 118, 123, 146, 156, 170]\n \nfor num in number:\n company1.remove(company1[num])\n'''\n\ncompany1=list(kospi.iloc[1:,1])\nnumber=[6, 8, 11, 25, 26, 32, 33, 39, 49, 54, 64, 71, 88, 97, 118, 123, 146, 156, 170]\na=[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]\nadd_list = [number[i] - a[i] for i in range(len(a))]\n\nfor num in add_list:\n company1.remove(company1[num])\ncompany1=list(company1) #200-19\n\n\n# import xlsxwriter\n'''\n# 10개의 회사 정보를 하나의 액셀 파일일에 저장\nwith pd.ExcelWriter('C:\\\\ITWILL\\\\sihun\\\\sheet\\\\Kospi_df.xlsx') as writer:\n for a in list(range(0,10)):\n company2=company1[a]\n print(company2)\n excel[a].to_excel(writer,sheet_name=f'{company2}')\n'''\n\n# 자동으로 181 개의 데이터를 10개의 액첼 파일에 저장\nlist(range(20))\nc=0\nd=10\nfor b in range(10):\n with pd.ExcelWriter(f'C:\\\\ITWILL\\\\sihun\\\\sheet\\\\Kospi_df{b}.xlsx') as writer:\n c+=10\n d+=10\n for a in list(range(c,d)):\n company2=company1[a]\n print(company2)\n excel[a].to_excel(writer,sheet_name=f'{company2}')","repo_name":"yeon4032/STUDY","sub_path":"GIT_NOTE/07_sihun/네이버 금융에서 회사 제무정보 excel with several sheet/네이버 금융에서 코스피 200의 주요제무정보를 여러 엑셀에 sheet 를 만들어 저장.py","file_name":"네이버 금융에서 코스피 200의 주요제무정보를 여러 엑셀에 sheet 를 만들어 저장.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"29900205099","text":"import os\n\nfrom sqlmesh.core.config import (\n AirflowSchedulerConfig,\n AutoCategorizationMode,\n CategorizerConfig,\n Config,\n DuckDBConnectionConfig,\n EnvironmentSuffixTarget,\n GatewayConfig,\n ModelDefaultsConfig,\n SparkConnectionConfig,\n)\nfrom sqlmesh.core.notification_target import (\n BasicSMTPNotificationTarget,\n SlackApiNotificationTarget,\n SlackWebhookNotificationTarget,\n)\nfrom sqlmesh.core.user import User, UserRole\n\nCURRENT_FILE_PATH = os.path.abspath(__file__)\nDATA_DIR = os.path.join(os.path.dirname(__file__), \"data\")\n\n\n# An in memory DuckDB config.\nconfig = Config(\n default_connection=DuckDBConnectionConfig(),\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n\n\n# A configuration used for SQLMesh tests.\ntest_config = Config(\n gateways={\"in_memory\": GatewayConfig(connection=DuckDBConnectionConfig())},\n default_gateway=\"in_memory\",\n auto_categorize_changes=CategorizerConfig(sql=AutoCategorizationMode.SEMI),\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n\n# A stateful DuckDB config.\nlocal_config = Config(\n default_connection=DuckDBConnectionConfig(database=f\"{DATA_DIR}/local.duckdb\"),\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n\n# Due to a 3.7 mypy bug we ignore. Can remove once 3.7 support is dropped.\nairflow_config = Config( # type: ignore\n default_scheduler=AirflowSchedulerConfig(),\n gateways=GatewayConfig(\n connection=SparkConnectionConfig(\n config_dir=os.path.join(CURRENT_FILE_PATH, \"..\", \"airflow\", \"spark_conf\"),\n config={\n \"spark.hadoop.javax.jdo.option.ConnectionURL\": \"jdbc:postgresql://localhost:5432/metastore_db\"\n },\n )\n ),\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n\n\n# Due to a 3.7 mypy bug we ignore. Can remove once 3.7 support is dropped.\nairflow_config_docker = Config( # type: ignore\n default_scheduler=AirflowSchedulerConfig(airflow_url=\"http://airflow-webserver:8080/\"),\n gateways=GatewayConfig(connection=SparkConnectionConfig()),\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n\n# A DuckDB config with a physical schema map.\nmap_config = Config(\n default_connection=DuckDBConnectionConfig(),\n physical_schema_override={\"sushi\": \"company_internal\"},\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n\n\nrequired_approvers_config = Config(\n default_connection=DuckDBConnectionConfig(),\n users=[\n User(\n username=\"admin\",\n roles=[UserRole.REQUIRED_APPROVER],\n notification_targets=[\n SlackApiNotificationTarget(\n notify_on=[\"apply_start\", \"apply_failure\", \"apply_end\", \"audit_failure\"],\n token=os.getenv(\"ADMIN_SLACK_API_TOKEN\"),\n channel=\"UXXXXXXXXX\", # User's Slack member ID\n ),\n ],\n )\n ],\n notification_targets=[\n SlackWebhookNotificationTarget(\n notify_on=[\"apply_start\", \"apply_failure\", \"run_start\"],\n url=os.getenv(\"SLACK_WEBHOOK_URL\"),\n ),\n BasicSMTPNotificationTarget(\n notify_on=[\"run_failure\"],\n host=os.getenv(\"SMTP_HOST\"),\n user=os.getenv(\"SMTP_USER\"),\n password=os.getenv(\"SMTP_PASSWORD\"),\n sender=\"sushi@example.com\",\n recipients=[\n \"team@example.com\",\n ],\n ),\n ],\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n\n\nenvironment_suffix_config = Config(\n default_connection=DuckDBConnectionConfig(),\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n environment_suffix_target=EnvironmentSuffixTarget.TABLE,\n)\n\n\nCATALOGS = {\n \"in_memory\": \":memory:\",\n \"other_catalog\": f\":memory:\",\n}\n\nlocal_catalogs = Config(\n default_connection=DuckDBConnectionConfig(catalogs=CATALOGS),\n default_test_connection=DuckDBConnectionConfig(catalogs=CATALOGS),\n model_defaults=ModelDefaultsConfig(dialect=\"duckdb\"),\n)\n","repo_name":"TobikoData/sqlmesh","sub_path":"examples/sushi/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":832,"dataset":"github-code","pt":"91"} +{"seq_id":"6870969848","text":"# Cos 모델 설치\nfrom app.models import Cos\n# cache server\nfrom django.core.cache import cache\n# 성분추출함수에 쓰일 패키지 설치\nfrom PIL import Image, ImageFont, ImageDraw\nfrom pytesseract import *\nimport pandas as pd\n# 코사인 유사도에 쓰일 패키지\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport os\n\nclass recommend:\n\n def __init__(self, link):\n # 업로드 된 화장품 성분 이미지 주소\n self.link = link\n self.fflist = self.text()\n\n def jaccard_similarity(self, doc1, doc2):\n doc1 = set(doc1)\n doc2 = set(doc2)\n return len(doc1 & doc2) / len(doc1 | doc2)\n\n\n # 이미지에서 글자 추출 후 가공\n def test(self):\n arg = self.link\n os.environ['TESSDATA_PREFIX'] = '/usr/share/tesseract-ocr/4.00/tessdata/'\n image = Image.open('./.' + arg)\n x = int(1920 / image.size[0])\n y = int(1080 / image.size[1])\n if x != 0:\n if y != 0:\n if x > y:\n image = image.resize((image.size[0] * y, image.size[1] * y))\n else:\n image = image.resize((image.size[0] * x, image.size[1] * x))\n\n text = image_to_string(image, lang=\"kor\")\n fflist = text.replace('\\n', '')\n fflist = list(map(lambda x: x.strip(), fflist.split(',')))\n return fflist\n\n # 코사인 유사도��� 통한 추천\n def cosine(self):\n fflist = self.fflist\n data = pd.read_csv(\"././static/cos5.txt\", header=None)\n listtt = fflist\n listtt = [v for v in listtt if v]\n lst2 = []\n for i in range(len(listtt)):\n lst = []\n for j in range(len(data[0])):\n test = self.jaccard_similarity(data[0][j], listtt[i])\n # 기존에 있던 화장품 성분과 이미지에서 추출한 성분의 자카드 유사도를 분석\n # 이미지에서 추출한 성분은 잘못읽혔을 가능성이 있으므로 논리비교 보다는 자카도 유사도의 유사도를 통해서 단어의 유사성을 판단\n if test > 0.5:\n lst.append((test, data[0][j]))\n lst.sort(reverse=True)\n if len(lst) != 0:\n lst2.append(lst[0][1])\n lst2 = ', '.join(lst2)\n data2 = cache.get_or_set('cosin_cos', Cos.objects.all())\n ffflist = []\n # ffflist에는 각각의 코사인유사도와 상품의 인덱스 번호가 들어간다\n # ffflist = [(0.1231545, 0), (0.321516, 1) ...]와 같은 형식으로 데이터가 담긴다.\n for i in range(len(data2)):\n sent = (data2[i].ingredient, ''.join(lst2))\n tfidf_vectorizer = TfidfVectorizer()\n tfidf_matrix = tfidf_vectorizer.fit_transform(sent) # 문장 벡터화 진행\n idf = tfidf_vectorizer.idf_\n\n cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])\n ffflist.append((float(cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])[0][0])))\n\n for j, k in enumerate(ffflist):\n ffflist[j] = (k, j)\n ffflist = sorted(ffflist, reverse=True)\n result = []\n for i in range(10):\n idx = ffflist[i][1]\n result.append({'prdname': data2[idx].prdname,\n 'ingredient': data2[i].ingredient,\n 'image': data2[i].image,\n 'brand': data2[i].brand,\n 'price': data2[i].price,\n 'cosine': str(ffflist[i][0])})\n return result\n\n\n\n\n","repo_name":"chljidn/cos_project3","sub_path":"app/views/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"3364503737","text":"from OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\nrotation = 0.0\n\ndef init():\n glClearColor(0.0, 0.0, 0.0, 1.0)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glEnable(GL_COLOR_MATERIAL)\n\n # Light source against the sphere as follows:\n light_pos = [1.0, 1.0, 1.0, 0.0]\n light_color = [1.0, 1.0, 1.0, 1.0]\n glLightfv(GL_LIGHT0, GL_POSITION, light_pos)\n glLightfv(GL_LIGHT0, GL_DIFFUSE, light_color)\n glLightfv(GL_LIGHT0, GL_SPECULAR, light_color)\n\n\n # Set the metallic effects of the sphere\n sphere_color = [0.8, 0.8, 0.8, 1.0]\n sphere_shininess = 100.0\n glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, sphere_color)\n glMaterialfv(GL_FRONT, GL_SPECULAR, sphere_color)\n glMaterialf(GL_FRONT, GL_SHININESS, sphere_shininess)\n\ndef display():\n global rotation\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glLoadIdentity()\n gluLookAt(0.0, 0.0, 5.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)\n glRotatef(rotation, 1.0, 1.0, 1.0)\n glutSolidSphere(1.0, 50, 50)\n glFlush()\n glutSwapBuffers()\n\n # set the sphere to rotate\n rotation += 0.5\n\ndef reshape(width, height):\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(width) / height, 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\ndef main():\n glutInit()\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\n glutInitWindowSize(500, 500)\n glutCreateWindow(b\"OpenGL Sphere\")\n init()\n glutDisplayFunc(display)\n glutReshapeFunc(reshape)\n glutIdleFunc(display)\n glutMainLoop()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Louis-Bat17/Game_Dev_Work","sub_path":"Class15_07/sphere.py","file_name":"sphere.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34486109199","text":"import pandas as pd\n\nmusics = [\"Classical\", \"Jazz\", \"Rap\", \"Pop\", \"Rock\", \"EDM\", \"Country\", \"Alternative\", \"Indie\", \"Cultural\", \"Lo-Fi\", \"Fortnite\"]\ngradeLevel = [\"Senior\", \"Junior\", \"Sophmore\", \"Freshman\"]\nclasses = {\n \"Senior\" : {},\n \"Junior\" : {},\n \"Sophmore\" : {},\n \"Freshman\" : {}\n}\n\nmusicCount = {}\ndef main():\n for el_class in classes:\n for music in musics:\n musicCount[music] = 0\n classes[el_class][music] = {\"count\" : 0}\n\n cols = [0, 1, 2]\n excel_file = pd.read_excel(\"PythonAndExcelTesting.xlsx\", usecols = cols)\n\n #stored all column headers/names in an array and stored in variable\n column_names = excel_file.columns.ravel()\n\n column_arrays = {}\n for column in column_names:\n column_arrays[column] = excel_file[column].tolist()\n \n #stored each column data in a variable for easier use later\n grade = column_arrays[\"What grade are you in?\"]\n average_grade = column_arrays['What is your average grade in each class?']\n music = column_arrays['What type of music do you listen to while studying? (pick at most two)']\n\n #print(grade)\n #print(average_grade)\n #print(music)\n\n for x in range(0, len(grade)):\n # sets at a specific grade level (index which x is) of a specific music and the key will be the count number of the specific music \n classes[grade[x]][music[x]][classes[grade[x]][music[x]][\"count\"]] = {\n \"AvgGrade\" : average_grade[x]\n }\n # increments count of total amount the music had been chosen in the specific class and overall\n classes[grade[x]][music[x]][\"count\"] += 1\n musicCount[music[x]] += 1\n\n #displays music information in each class\n for level in gradeLevel:\n print(f\"This is the information for the {level}s\")\n # to get the raw data\n #print(classes[level])\n for music in musics:\n print(music + \":\")\n for i in range(0, classes[level][music][\"count\"]):\n print(f\"Student {i + 1}:\\nAverage Grade: {classes[level][music][i]['AvgGrade']}\")\n \n print(\"\\n\")\n\n #displays the total count for each music genre\n print(\"This shows the total amount of music in total\")\n for music in musics:\n print(f\"{music}: {musicCount[music]}\")\n\n\nif __name__==\"__main__\":\n main()","repo_name":"YoshiFrancis/Python-Works","sub_path":"excel.py","file_name":"excel.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"26251424707","text":"from tkinter import *\nfrom tkinter import messagebox\n\nclass Gui(Tk):\n def __init__(self):\n super().__init__()\n\n self.title(\"Hotel Check In\")\n self.configure(bg=\"white\")\n \n self.successimg = PhotoImage(file=\"U:\\Programming\\com404\\\\2-guis\\\\4-images\\\\success.png\")\n self.failedimg = PhotoImage(file=\"U:\\Programming\\com404\\\\2-guis\\\\4-images\\\\failed.png\")\n\n self.__add_frame()\n self.__add_heading_label()\n\n self.__add_first_entry_label()\n self.__add_first_entry()\n self.__add_checkbox()\n \n self.__add_second_entry_label()\n self.__add_second_entry()\n self.__add_second_checkbox()\n \n self.__add_third_entry_label()\n self.__add_third_entry()\n self.__add_third_checkbox()\n\n self.__add_check_button()\n \n \n \n\n def __add_frame(self):\n self.frame = Frame()\n self.frame.pack()\n self.frame.configure(highlightthickness=4,highlightbackground=\"black\",bg=\"white\")\n \n def __add_heading_label(self):\n self.heading_label = Label(self.frame)\n self.heading_label.grid(row=0,column=0,columnspan=3)\n self.heading_label.configure(text=\"Hotel Check In\", font=\"Arial 24\",bg=\"white\")\n \n def __add_first_entry_label(self):\n self.first_entry_label= Label(self.frame)\n self.first_entry_label.grid(row=1,column=0,pady=7,sticky=W)\n self.first_entry_label.configure(text=\"Name:\", font=\"Arial 16\",bg=\"white\")\n\n def __add_first_entry(self):\n self.first_entry = Entry(self.frame)\n self.first_entry.grid(row=1,column=1,pady=7)\n self.first_entry.configure(borderwidth=0,highlightthickness=4,highlightbackground=\"black\",width=12,font=\"Arial 14\")\n self.first_entry.bind(\"\", self.name_check)\n\n def __add_checkbox(self):\n self.checkbox = Label(self.frame)\n self.checkbox.grid(row=1,column=3,pady=7,padx=4)\n self.checkbox.configure(image=self.failedimg,bg=\"white\",relief=SOLID,highlightthickness=4,highlightbackground=\"black\",height=22,width=25)\n \n def __add_second_entry_label(self):\n self.second_entry_label = Label(self.frame)\n self.second_entry_label.grid(row=2,column=0,pady=7,sticky=W)\n self.second_entry_label.configure(text=\"Passport Number\", font=\"Arial 16\",bg=\"white\")\n\n def __add_second_entry(self):\n self.second_entry = Entry(self.frame)\n self.second_entry.grid(row=2,column=1,pady=7)\n self.second_entry.configure(borderwidth=0,highlightthickness=4,highlightbackground=\"black\",width=12,font=\"Arial 14\")\n self.second_entry.bind(\"\", self.passport_num_check)\n\n def __add_second_checkbox(self):\n self.checkbox1 = Label(self.frame)\n self.checkbox1.grid(row=2,column=3,pady=7,padx=4)\n self.checkbox1.configure(image=self.failedimg,bg=\"white\",relief=SOLID,highlightthickness=4,highlightbackground=\"black\",height=25,width=25)\n\n def __add_third_entry_label(self):\n self.third_entry_label = Label(self.frame)\n self.third_entry_label.grid(row=3,column=0,pady=7,sticky=W)\n self.third_entry_label.configure(text=\"No. of nights\", font=\"Arial 16\",bg=\"white\")\n\n def __add_third_entry(self):\n self.third_entry = Entry(self.frame)\n self.third_entry.grid(row=3,column=1,pady=7)\n self.third_entry.configure(borderwidth=0,highlightthickness=4,highlightbackground=\"black\",width=12,font=\"Arial 14\")\n self.third_entry.bind(\"\", self.num_of_nights_check)\n\n def __add_third_checkbox(self):\n self.checkbox2 = Label(self.frame)\n self.checkbox2.grid(row=3,column=3,pady=7,padx=4)\n self.checkbox2.configure(image=self.failedimg,bg=\"white\",relief=SOLID,highlightthickness=4,highlightbackground=\"black\",height=25,width=25)\n\n def __add_check_button(self):\n self.check_button = Button(self.frame)\n self.check_button.grid(row=4,column=0,columnspan=2,pady=7,sticky=E,padx=20)\n self.check_button.configure(borderwidth=4,relief=SOLID,highlightcolor=\"black\",background=\"white\",width=10,height=2,text=\"Check In\")\n\n def name_check(self,Event):\n if len(self.first_entry.get()) < 1:\n messagebox.showerror(\"Invalid Information\",\"Name field needs to be filled in.\")\n else:\n self.checkbox.configure(image=self.successimg)\n\n def passport_num_check(self, Event):\n if len(self.second_entry.get()) < 1:\n messagebox.showerror(\"Invalid Information\",\"Passport number field needs to be filled in.\")\n else:\n self.checkbox1.configure(image=self.successimg)\n def num_of_nights_check(self, Event):\n if len(self.third_entry.get()) < 1:\n messagebox.showerror(\"Invalid Information\",\"Number of Nights needs to be filled in.\")\n elif int(self.third_entry.get()) > 365:\n messagebox.showerror(\"Invalid Information\", \"Number of nights cannot be more than 365.\") \n else:\n self.checkbox2.configure(image=self.successimg)\n\n\n\nif (__name__ == \"__main__\"):\n gui = Gui()\n gui.mainloop()","repo_name":"IIvanov29/com404","sub_path":"2-guis/4-images/4-hiding-and-showing.py","file_name":"4-hiding-and-showing.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"74136100784","text":"import torchvision.models as models\nimport torch.nn as nn\nimport torchvision.models.resnet as resnet\nimport torch.utils.model_zoo as model_zoo\nimport torch\nclass BasicConv(nn.Module):\n\n def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):\n super(BasicConv, self).__init__()\n self.out_channels = out_planes\n self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)\n self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None\n self.relu = nn.ReLU(inplace=True) if relu else None\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n\nclass BasicSepConv(nn.Module):\n\n def __init__(self, in_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):\n super(BasicSepConv, self).__init__()\n self.out_channels = in_planes\n self.conv = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups = in_planes, bias=bias)\n self.bn = nn.BatchNorm2d(in_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None\n self.relu = nn.ReLU(inplace=True) if relu else None\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n\nclass BasicRFB(nn.Module):\n\n def __init__(self, in_planes, out_planes, stride=1, scale = 0.1):\n super(BasicRFB, self).__init__()\n self.scale = scale\n self.out_channels = out_planes\n inter_planes = in_planes // 8\n self.branch1 = nn.Sequential(\n BasicConv(in_planes, inter_planes, kernel_size=1, stride=1),\n BasicConv(inter_planes, (inter_planes//2)*3, kernel_size=(1,3), stride=1, padding=(0,1)),\n BasicConv((inter_planes//2)*3, (inter_planes//2)*3, kernel_size=(3,1), stride=stride, padding=(1,0)),\n BasicSepConv((inter_planes//2)*3, kernel_size=3, stride=1, padding=3, dilation=3, relu=False)\n )\n self.branch2 = nn.Sequential(\n BasicConv(in_planes, inter_planes, kernel_size=1, stride=1),\n BasicConv(inter_planes, (inter_planes//2)*3, kernel_size=3, stride=1, padding=1),\n BasicConv((inter_planes//2)*3, (inter_planes//2)*3, kernel_size=3, stride=stride, padding=1),\n BasicSepConv((inter_planes//2)*3, kernel_size=3, stride=1, padding=5, dilation=5, relu=False)\n )\n\n self.ConvLinear = BasicConv(3*inter_planes, out_planes, kernel_size=1, stride=1, relu=False)\n if in_planes == out_planes:\n self.identity = True\n else:\n self.identity = False\n self.shortcut = BasicConv(in_planes, out_planes, kernel_size=1, stride=stride, relu=False)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self,x):\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n\n out = torch.cat((x1,x2),1)\n out = self.ConvLinear(out)\n if self.identity:\n out = out*self.scale + x\n else:\n short = self.shortcut(x)\n out = out*self.scale + short\n out = self.relu(out)\n return out\n\n\nclass ResNet18(resnet.ResNet):\n\n def __init__(self, pretrained=True, num_classes=212, **kwargs):\n super(ResNet18, self).__init__(resnet.BasicBlock, [2, 2, 2, 2], num_classes=num_classes * 2, **kwargs)\n if pretrained:\n pretrained_dict = dict(model_zoo.load_url(resnet.model_urls['resnet18']))\n del pretrained_dict['fc.weight']\n del pretrained_dict['fc.bias']\n model_dict = self.state_dict()\n model_dict.update(pretrained_dict)\n self.load_state_dict(model_dict)\n self.pred1 = BasicRFB(64, 64)\n self.pred2 = BasicRFB(128, 128)\n self.pred3 = BasicRFB(256, 256)\n self.pred4 = BasicRFB(512, 512)\n self.pred1_c = nn.Conv2d(64, 212 * 2, 1)\n self.pred2_c = nn.Conv2d(128, 212 * 2, 1)\n self.pred3_c = nn.Conv2d(256, 212 * 2, 1)\n self.pred4_c = nn.Conv2d(512, 212 * 2, 1)\n self.sigmoid = nn.Sigmoid()\n self.soft_max = nn.Softmax(dim=-1)\n\n def forward(self, x):\n out = []\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n out.append(self.pred1_c(self.pred1(x)))\n\n x = self.layer2(x)\n out.append(self.pred2_c(self.pred2(x)))\n\n x = self.layer3(x)\n out.append(self.pred3_c(self.pred3(x)))\n\n x = self.layer4(x)\n out.append(self.pred4_c(self.pred4(x)))\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n out.append(self.fc(x))\n\n res_list = []\n prob_list = []\n for p in out:\n res, prob = p.split([212, 212], dim=1)\n res = res.view((res.shape[0], res.shape[1], -1))\n prob = prob.view((prob.shape[0], prob.shape[1], -1))\n res_list.append(res)\n prob_list.append(prob)\n res = torch.cat(res_list, dim=-1)\n prob = torch.cat(prob_list, dim=-1)\n res = self.sigmoid(res)\n prob = self.soft_max(prob)\n out = res * prob\n out = out.sum(dim=-1)\n return out\n","repo_name":"JZDSS/emci","sub_path":"models/resnet18_rfb.py","file_name":"resnet18_rfb.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"91"} +{"seq_id":"19374522934","text":"# The goal of this file is to load images from the negative people dataset and create all their encodings\n# with resNet50 or googleNet. The created encodings will be used in the main flow of the code to fit KNN with\n# not only positive samples but also negative once.\n# The second part of code is useful for test how knn works on the generated encodings.\n\n# USAGE: \n# python createPerson_encodings.py --encOut encodings --createEncoding\n# python createPerson_encodings.py --encOut encodings --createEncoding --model resNet50 --imgOut imagesOut/s5n100_ -s 5 -n 100\n# python createPerson_encodings.py --encOut encodings --createEncoding --model googleNet --imgOut imagesOut/s9n90_ -s 9 -n 90\n\n# python createPerson_encodings.py --encOut encodings --model resNet50 --imgOut imgsOut/s5n100_ -s 5 -n 100\n# python createPerson_encodings.py --encOut encodings --model googleNet --imgOut imagesOut/s9n90_ -s 9 -n 90\n\n# import the necessary packages\nfrom scipy import spatial\nimport argparse\nimport pickle\nimport heapq\nimport cv2\nimport pprint\n\n#import custom functions\nimport sys\nimport os\nsys.path.append(os.path.abspath('./utils'))\nfrom dataset import NegativePeopleDataset\nfrom model import ResNet50, GoogleNet\nfrom gridView import createGrid\nimport pprint\n\n############################ Flow controll functions ###################################################\ndef computeEncoding(model, imagePath):\n\t\"\"\" Given a single image path, compute its encoding. \"\"\"\n\t# load the input image and convert it from RGB (OpenCV ordering) to dlib ordering (RGB)\n\timage = cv2.imread(imagePath)\n\t# compute the embedding\n\tencoding = model.feed(image)\n\treturn(encoding)\n\ndef encodeDataset(model, dataset, encodingsFile) -> dict:\n\t\"\"\"\tScan a part of the dataset and fill the N-dimensional space with the input samples. \"\"\"\n\tprint(\"[INFO] Quantifying samples...\")\n\n\t# grab the paths to the input samples in our dataset\n\tlabelsAndPaths = dataset.getLabelsAndPaths()\n\t# initialize the list of known encodings\n\tencodings = {}\n\n\t# loop over the image paths\n\tfor (i, (l, path)) in enumerate(labelsAndPaths):\n\t\tprint(\"[INFO] processing image {}/{}\".format(i + 1, len(labelsAndPaths)))\n\t\tif l not in encodings:\n\t\t\t# add to the dictionary a new key\n\t\t\tencodings[l] = [[computeEncoding(model, path), path]]\n\t\telse:\n\t\t\tencodings[l].append([computeEncoding(model, path), path])\n\t\t\n\t\t#print(len(encodings[l][0][0])) #1024 for googleNet and 2048 for resNet50\n\n\t# dump the people encodings to disk\n\tprint(\"\\n[INFO] Serializing encodings...\")\n\tf = open(encodingsFile, \"wb\")\n\tf.write(pickle.dumps(encodings))\n\tf.close()\n\n\t#return the dicotionary\n\treturn(encodings)\n\ndef pairPeople(model, dataset, encodings: dict, queryNum: int=None, imgOut: str=None) -> None:\n\t\"\"\" Given a new image try to match it with the encodings previously calculated. \"\"\"\n\tprint(\"[INFO] Try pairing people...\")\n\n\t(queryLabel, queryPath) = dataset.queryImgPath(queryNum)\t# get a random query\n\n\tif queryPath is not None:\n\t\tqueryEncode = computeEncoding(model, queryPath)\t\t\t\t# compute its encoding\n\t\tprint(\"compute the query for the class id:\", queryLabel)\n\n\t\tlabels = []\n\t\tpaths = []\n\t\tdistances = []\n\t\tfor (label, listEnc) in encodings.items():\n\t\t\t# each key can store multiple points (AKA each identity can has multiple samples pictures)\n\t\t\tfor (enc, path) in listEnc:\n\t\t\t\tlabels.append(label)\n\t\t\t\tpaths.append(path)\n\t\t\t\tdist = spatial.distance.euclidean(queryEncode, enc)\n\t\t\t\tdistances.append(dist)\n\t\t\t\t#print(\"distance from B{} and A{} is: {:.3f}\".format(imgB, k, dist))\n\n\t\ttopK = heapq.nsmallest(9, zip(distances, labels, paths))\n\t\tfor (i, (score, id, pth)) in enumerate(topK):\n\t\t\tprint(\"The {}° match has score {:.3f} and id {}\".format(i+1, score, id))\n\t\tprint()\n\n\t\t#create the Grid for a better visualization\n\t\tif imgOut is not None: #aka: I want to see the result\n\t\t\tquery = [(queryLabel, queryPath)]\n\t\t\t# remove useless distance value and add the query img\n\t\t\tbestMatch = query + [el[1:] for el in topK]\n\n\t\t\t#create a grid visualization and show it\n\t\t\tgridImg = createGrid(bestMatch, 3, 3)\n\t\t\tif gridImg is not None:\n\t\t\t\tname=\"\".join([imgOut, str(queryLabel), \".jpg\"])\n\t\t\t\tcv2.imwrite(name, gridImg)\n\n\t\t\t\tcv2.imshow(\"grid\", gridImg)\n\t\t\t\tcv2.waitKey(-1)\n\t\t\t\tcv2.destroyAllWindows()\n\t\n\n############################ Main functions ############################################################\n\ndef parseArguments() -> None:\n\t\"\"\"Construct the argument parser and parse the arguments.\"\"\"\n\tap = argparse.ArgumentParser()\n\tap.add_argument(\"-m\", \"--model\",\t\ttype=str, default=\"resNet50\",\t\thelp=\"A model name, choises: resNet50, googleNet\")\n\tap.add_argument(\"-e\", \"--encOut\",\t\ttype=str, default=\"encsOut\",help=\"Path to the output directory for the encoding.\")\n\tap.add_argument(\"-o\", \"--imgOut\",\t\ttype=str, default=\"imgsOut\",help=\"Path to the output directory for the images.\")\n\tap.add_argument(\"-c\", \"--createEncoding\",\t\taction=\"store_true\", default=False,\thelp=\"If create the encoding of the dataset or load it.\")\n\tap.add_argument(\"-n\", \"--nImgs\",\t\ttype=int, default=10,\t\thelp=\"The number of selected people for training.\")\n\tap.add_argument(\"-s\", \"--sets\",\t\t\ttype=int, default=5,\t\thelp=\"If NegativePeopleDataset, how many sets of imges will be used for training (min 1 and max 9). \")\n\n\targs = vars(ap.parse_args())\n\treturn(args)\n\ndef main() -> None:\n\targs = parseArguments()\n\n\t#load the dataset class\n\tprint(\"loading NegativePeopleDataset.\")\n\tdataset = NegativePeopleDataset(args[\"nImgs\"], nTestDir=args[\"sets\"])\n\n\t#create model\n\tmodel = None\n\tif\t args[\"model\"]==\"resNet50\":\n\t\tmodel = ResNet50()\n\telif args[\"model\"]==\"googleNet\":\n\t\tmodel = GoogleNet()\n\n\tif dataset is not None and model is not None:\n\t\t#create or load the encodings dictionary\n\t\tencodingsFile = ''.join([args[\"encOut\"], \"/\", args[\"model\"], \"_negativePeople_s\", str(args[\"sets\"]), \"-n\", str(args[\"nImgs\"]), \".pkl\"])\n\t\tif args[\"createEncoding\"]:\n\t\t\tprint(\"[INFO] Creating encodings...\")\n\t\t\tencodings = encodeDataset(model, dataset, encodingsFile)\n\t\telse:\n\t\t\tprint(\"[INFO] Loading encodings...\")\n\t\t\tencodings = pickle.loads(open(encodingsFile, \"rb\").read())\n\n\t\t#pairing people to the space of encodings\n\t\twhile True:\n\t\t\tq=0\n\t\t\ti = input(\"\\nquery number (error will quit):\")\n\t\t\tif i!=\"\":\n\t\t\t\ttry:\n\t\t\t\t\tq = int(i)\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"The input is not a number...\")\n\t\t\t\t\tbreak\n\t\t\timgBaseName = ''.join([args[\"imgOut\"], args[\"model\"], \"_negativePeople_\"])\n\t\t\tpairPeople(model, dataset, encodings, q, imgBaseName)\n\nif __name__ == \"__main__\":\n\tmain()\n\t\n\n","repo_name":"leopold-lll/aRobotToFollowPeople","sub_path":"createPerson_encodings.py","file_name":"createPerson_encodings.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"6640321178","text":"import datetime\nfrom io import BytesIO\n\nimport requests\nfrom PIL import Image\n\nfrom bot_init import bot\nfrom constants import weather_modes\n\n\ndef get_weather_map(message, mode):\n\n if mode in ('temp', 'led'):\n img = requests.get(weather_modes[mode])\n elif mode == 'satellite':\n today = datetime.date.today().strftime(\"%Y%m%d\")\n print(today)\n now = datetime.datetime.now()\n n = now.hour - 3\n img = requests.get(f'{weather_modes[mode]}-{today}1100.jpg')\n while b\"\"\"\\xff\"\"\" not in img.content:\n s1 = f'{n:02d}'\n img = requests.get(f'{weather_modes[mode]}_{today}00_{s1}.png')\n n -= 1\n else:\n today = datetime.date.today().strftime(\"%Y%m%d\")\n print(today)\n now = datetime.datetime.now()\n n = now.hour - 3\n img = requests.get(f'{weather_modes[mode]}_11111111_11.png')\n while b\"\"\"\\x89PNG\"\"\" not in img.content:\n s1 = f'{n:02d}'\n img = requests.get(f'{weather_modes[mode]}_{today}00_{s1}.png')\n n -= 1\n title = None\n i = Image.open(BytesIO(img.content))\n try:\n bot.send_photo(message.from_user.id, i, title)\n except:\n bot.reply_to(message, \"Sorry, seems that radar is not working at the moment, try later.\")\n","repo_name":"zzappa/weather_bot","sub_path":"weather_maps.py","file_name":"weather_maps.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"41606240447","text":"input = open(\"input.txt\", \"r\")\n\ndef assignmentInput(input):\n input = input.readlines()\n count = 0\n for line in input:\n line = line.strip()\n set1 = line.split(\",\")\n set1value1 = int(set1[0].split(\"-\")[0])\n set1value2 = int(set1[0].split(\"-\")[1])\n set2value1 = int(set1[1].split(\"-\")[0])\n set2value2 = int(set1[1].split(\"-\")[1])\n if set1value1 <= set2value2 and set1value2 >= set2value1:\n count += 1\n #print(str(set1value1) + \"-\" + str(set1value2) + \" is fit in by \" + str(set2value1) + \"-\" + str(set2value2))\n elif set1value1 >= set2value2 and set1value2 <= set2value1:\n count += 1\n #print(str(set1value1) + \"-\" + str(set1value2) + \" fits in \" + str(set2value1) + \"-\" + str(set2value2))\n return count\n\nprint(assignmentInput(input))\n","repo_name":"willrr/AOC-2022","sub_path":"4/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"11147190298","text":"class Solution:\n \"\"\"\n You are given an m x n integer array grid. There is a robot initially located\n at the top-left corner (i.e., grid[0][0]). The robot tries to move to the\n bottom-right corner (i.e., grid[m - 1][n - 1]). The robot can only move either\n down or right at any point in time.\n\n An obstacle and space are marked as 1 or 0 respectively in grid. A path that\n the robot takes cannot include any square that is an obstacle.\n\n Return the number of possible unique paths that the robot can take to reach the bottom-right corner.\n\n The testcases are generated so that the answer will be less than or equal to 2 * 109.\n\n Constraints:\n - m == obstacleGrid.length\n - n == obstacleGrid[i].length\n - 1 <= m, n <= 100\n - obstacleGrid[i][j] is 0 or 1.\n \"\"\"\n\n def uniquePathsWithObstacles(self, obstacle_grid: list[list[int]]) -> int:\n \"\"\"\n Ways to get to cell = ways to get to cell above + ways to get to cell to the left.\n O(n * m) / O(1) time / space complexity\n \"\"\"\n # if grid is empty or start/end cell is blocked, there are no unique paths to goal\n if not obstacle_grid or obstacle_grid[0][0] or obstacle_grid[-1][-1]:\n return 0\n\n m, n = len(obstacle_grid), len(obstacle_grid[0])\n\n # fill grid with unique nr of ways to get to each position, since robot can only move right\n # or down, to calculate unique paths to a square only unique paths to left and upper neighbor\n # need to be known\n\n # set number of ways to reach starting cell to 1\n obstacle_grid[0][0] = 1\n\n # initialize first col, if upper neighbor can not be reached, or obstacle on cell, cell can not be reached\n for i in range(1, m):\n obstacle_grid[i][0] = obstacle_grid[i - 1][0] * (1 - obstacle_grid[i][0])\n # initialize first row, if left neighbor can not be reached, or obstacle on cell, cell can not be reached\n for j in range(1, n):\n obstacle_grid[0][j] = obstacle_grid[0][j - 1] * (1 - obstacle_grid[0][j])\n\n # fill in gaps from initalized rows, any cell accessed will still have 0 or 1 depending on if\n # obstacle or space, all cells below and to the left are already initialized\n # value to a cell is equal to sum of upper and left neighbor paths if cell does not contain an obstacle, else 0\n for i in range(1, m):\n for j in range(1, n):\n obstacle_grid[i][j] = (\n obstacle_grid[i - 1][j] + obstacle_grid[i][j - 1]\n ) * (1 - obstacle_grid[i][j])\n\n # return nr of paths to end cell\n return obstacle_grid[-1][-1]\n","repo_name":"jJup0/LeetCode","sub_path":"Medium/63. Unique Paths II.py","file_name":"63. Unique Paths II.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"71377393904","text":"'''\nIdentify potential duplicates, based on surnames\nSecond run: collapse them\n'''\n#import graph_tool as gt\nimport json\nimport pandas as pd\n#import os.path as os\nimport unicodedata\n\ndatafile_in = 'combined_metadata.json'\npotential_dupes_file = 'potential_dupes.csv'\n\n#if not os.exists(potential_dupes_file):\n# Load the data file\nwith open(datafile_in) as readfile:\n\tauthors = json.load(readfile)\n\n# Convert to a Pandas data frame\nauthors_df = pd.DataFrame(authors)\n# Remove some columns we won't need\ndel authors_df['areas']\ndel authors_df['docs']\n# `name` is a column of dicts; break it out\nauthors_df['surname'] = pd.Series([author['name']['surname'] for author in authors])\nauthors_df['given'] = pd.Series([author['name']['given'] for author in authors])\n# Convert surname to ascii, dropping non-ascii characters\n#authors_df['surname_ascii'] = authors_df['surname'].str.encode('ascii', 'ignore')\nauthors_df['surname_ascii'] = pd.Series(\n\t[unicodedata.normalize('NFKD', surname).encode('ascii', 'ignore') \n\t\tfor surname in authors_df['surname']])\n\n# Identify ascii-ed surnames that appear more than once\nsurnames = set(authors_df['surname_ascii'].tolist())\nsurnames = [surname for surname in surnames \n\t\t\tif len(authors_df[authors_df['surname_ascii'] == surname]) > 1]\n# Identify the authors with these ascii-ed surnames\nauthors_df = authors_df[authors_df['surname_ascii'].isin(surnames)]\nauthors_df = authors_df.sort_values('surname_ascii')\n\n# Clean up by removing the column of dicts and ascii-ed surnames\ndel authors_df['name']\n#del authors_df['surname_ascii']\n\n# Write to a CSV for manual checking\n#print(authors_df)\nauthors_df.to_csv(potential_dupes_file, index = False)\n","repo_name":"dhicks/coauthor-network","sub_path":"find_duplicates/find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"5662605785","text":"from urllib3 import PoolManager\nimport urllib3\n\n\nclass Request(object):\n\n def __init__(self, host, port=\"80\", ssl=\"False\"):\n self.host = host\n self.port = port\n self.http = PoolManager()\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n if ssl == \"True\":\n self.url = \"https://%s:%s\" % (self.host, self.port)\n else:\n self.url = \"http://%s:%s\" % (self.host, self.port)\n\n def __call__(self, uri, proto='GET', data=None):\n def decorator(callback):\n def wrapper():\n try:\n resp = self.http.request(proto, self.url + uri, data).data\n except Exception as e:\n pass\n return callback(resp)\n return wrapper\n return decorator\n","repo_name":"Cgboal/C2-Framework","sub_path":"app-image/agent/agent/lib/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"91"} +{"seq_id":"40632032735","text":"# Each contact will be rendered as a dictionary\n# Contacts should include first name, last name, full name, company, phone number, e-mail address\n# Contacts can be organised in a dictionary (i.e. dictionary of dictionaries) - each key of the outer dictionary will be the name of the contact\n# Add contacts: Go through a list of contact details which is supplied by the user\n# Delete contacts: Search the list for 'name' keys with a particular value and delete\n# Search contacts: Same functionality as for deletion\n# Sort contacts: Sort the keys in the dictionary\n# How can the contacts list be saved for later (i.e. to avoid having to add them all again?)\n\n# Take a case-insensitive string - use this to search the contacts database\n# e.g. 'proquest' - search anything where this is mentioned in company or e-mail address\n# Enable functionality to display a specific contact\n# List first name, last name, e-mail address\n\nimport textwrap\n\n\nclass Contact:\n def __init__(self, first_name, last_name, company, phone, email):\n self.first_name = first_name\n self.last_name = last_name\n self.company = company\n self.phone = phone\n self.email = email\n\n def __str__(self):\n # Removes leading whitespace\n return textwrap.dedent(f\"\"\"\n first name: {self.first_name}\n last name: {self.last_name}\n company: {self.company}\n phone: {self.phone}\n email: {self.email}\n \"\"\")\n\n def __contains__(self, value):\n for var in vars(self):\n if value.casefold() in getattr(self, var).casefold():\n return True\n return False\n\n def save_format(self):\n return \"|\".join([\n self.first_name,\n self.last_name,\n self.company,\n self.phone,\n self.email])\n\n\ndef menu_of_options():\n print(\n \"\"\"\n ===== MENU =====\n 1 - Print the menu\n 2 - Print the contacts list\n 3 - Add a contact\n 4 - Edit a contact by name\n 5 - Delete a contact by name\n 6 - Save the file\n 7 - Sort the contacts\n 8 - Search contacts\n 9 - Print a contact by name\n 0 - Exit\n \"\"\")\n\n\ndef print_all_contacts(contacts):\n \"\"\"\n Given the contacts list\n print all the contacts as a dictionary\"\"\"\n for contact in contacts:\n print(contact)\n\n\ndef add_a_contact(contacts):\n first_name = input(\"Enter the contact's first name: \")\n last_name = input(\"Enter the contact's last name: \")\n company = input(\"Enter the name of the company the contact works for: \")\n phone = input(\"Enter the contact's phone number: \")\n email = input(\"Enter the contact's e-mail address: \")\n contacts.append(\n Contact(\n first_name,\n last_name,\n company,\n phone,\n email\n ))\n save_the_contacts(contacts)\n return contacts\n\n\ndef filter_contact(contacts, field, value):\n filters = [contact for contact in contacts if getattr(contact, field) == value]\n while len(filters) > 1:\n print(f\"More than one contact with last name {value} in the database.\")\n filter_field = input(\n \"\"\"Enter another search term to filter the search.\n Choose from: first_name, company, phone, email :\n \"\"\")\n filter_field_value = input(\n f\"\"\"Enter the value of the {filter_field} you want to search on:\n \"\"\")\n filters = [contact for contact in filters if getattr(contact, filter_field) == filter_field_value]\n return filters\n\n\ndef delete_contact(contacts):\n to_edit = input(\n \"\"\"Enter the contact's last name: \"\"\").strip()\n\n contact_to_delete = filter_contact(contacts, 'last_name', to_edit)[0]\n contacts.remove(contact_to_delete)\n save_the_contacts(contacts)\n\n\ndef edit_contact(contacts):\n to_edit = input(\n \"\"\"Enter the contact's last name: \"\"\").strip()\n\n contact_to_edit = filter_contact(contacts, 'last_name', to_edit)[0]\n\n\n while True:\n selection = input(\n \"\"\"\n Which contact field do you want to edit?\n Select from:\n 1 - First Name\n 2 - Last Name\n 3 - Company\n 4 - Phone\n 5 - E-mail\n : \"\"\"\n )\n\n if selection == '1':\n field_to_edit = \"first_name\"\n break\n elif selection == '2':\n field_to_edit = \"last_name\"\n break\n elif selection == '3':\n field_to_edit = \"company\"\n break\n elif selection == '4':\n field_to_edit = \"phone\"\n break\n elif selection == '5':\n field_to_edit = \"e-mail\"\n break\n else:\n \"Please enter a number from the menu.\"\n\n\n new_value = input(\"Type replacement text for this field: \")\n setattr(contact_to_edit, field_to_edit, new_value)\n save_the_contacts(contacts)\n return contacts\n\n\ndef save_the_contacts(contacts):\n with open('contacts2_out.txt', 'wt') as output_file:\n for contact in contacts:\n output_file.write(f\"{contact.save_format()}\\n\")\n\n\ndef search_contacts(contacts):\n search_term = input(\"Enter text to search for in contacts: \")\n search_term_found = False\n for contact in contacts:\n if search_term in contact:\n print(f\"\\nValue {search_term} found in contact for {contact.last_name}\")\n search_term_found = True\n if not search_term_found:\n print(f\"No value {search_term} in contacts database.\")\n\n\ndef print_contact_by_name(contacts):\n to_find = input(\n \"\"\"Enter the contact's last name: \"\"\").strip()\n contact_to_display = filter_contact(contacts, 'last_name', to_find)[0]\n print(\n f\"\"\"\n First Name: {contact_to_display.first_name}\n Last Name: {contact_to_display.last_name}\n E-mail: {contact_to_display.email}\n \"\"\")\n\n\nwith open('contacts2.txt', 'rt') as input_file:\n contacts = []\n for line in [line.strip() for line in input_file]:\n first_name, last_name, company, phone, email = line.split('|')\n contacts.append(Contact(first_name, last_name, company, phone, email))\n\n\nwhile True:\n menu_of_options()\n selection = input(\"Enter a number from the menu: \")\n\n if selection == '1':\n menu_of_options()\n elif selection == '2':\n print_all_contacts(contacts)\n elif selection == '3':\n add_a_contact(contacts)\n elif selection == '4':\n edit_contact(contacts)\n elif selection == '5':\n delete_contact(contacts)\n elif selection == '6':\n save_the_contacts(contacts)\n elif selection == '7':\n contacts.sort(key = lambda i: i.last_name)\n elif selection == '8':\n search_contacts(contacts)\n elif selection == '9':\n print_contact_by_name(contacts)\n elif selection == '0':\n break\n else:\n \"Please enter a number from the menu.\"\n","repo_name":"khind1982/computerkh","sub_path":"contacts_with_class.py","file_name":"contacts_with_class.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"14918285232","text":"import cv2\nimport numpy as np\nfrom pylie import SO3, SE3\n\n\nclass Size:\n \"\"\"Represents image size\"\"\"\n\n def __init__(self, width: float, height: float):\n self._width = width\n self._height = height\n\n @classmethod\n def from_numpy_shape(cls, shape):\n return cls(*shape[1::-1])\n\n @property\n def width(self):\n return self._width\n\n @property\n def height(self):\n return self._height\n\n\ndef homogeneous(x):\n \"\"\"Transforms Cartesian column vectors to homogeneous column vectors\"\"\"\n return np.r_[x, [np.ones(x.shape[1])]]\n\n\ndef hnormalized(x):\n \"\"\"Transforms homogeneous column vector to Cartesian column vectors\"\"\"\n return x[:-1] / x[-1]\n\n\nclass PerspectiveCamera:\n \"\"\"Camera model for the perspective camera\"\"\"\n\n def __init__(self,\n calibration_matrix: np.ndarray,\n distortion_coeffs: np.ndarray,\n image_size: Size):\n \"\"\"Constructs the camera model.\n\n :param calibration_matrix: The intrinsic calibration matrix.\n :param distortion_coeffs: Distortion coefficients on the form [k1, k2, p1, p2, k3].\n :param image_size: Size of image for this calibration.\n \"\"\"\n self._calibration_matrix = calibration_matrix\n self._calibration_matrix_inv = np.linalg.inv(calibration_matrix)\n self._distortion_coeffs = distortion_coeffs\n self._image_size = image_size\n\n def undistort_image(self, distorted_image):\n \"\"\"Undistorts an image corresponding to the camera model.\n\n :param distorted_image: The original, distorted image.\n :returns: The undistorted image.\n \"\"\"\n\n return cv2.undistort(distorted_image, self._calibration_matrix, self._distortion_coeffs)\n\n def pixel_to_normalised(self, point_pixel):\n \"\"\"Transform a pixel coordinate to normalised coordinates\n\n :param point_pixel: The 2D point in the image given in pixels.\n \"\"\"\n\n if point_pixel.ndim == 1:\n # Convert to column vector.\n point_pixel = point_pixel[:, np.newaxis]\n\n return self._calibration_matrix_inv @ homogeneous(point_pixel)\n\n @property\n def calibration_matrix(self):\n \"\"\"The intrinsic calibration matrix K.\"\"\"\n return self._calibration_matrix\n\n @property\n def calibration_matrix_inv(self):\n \"\"\"The inverse calibration matrix K^{-1}.\"\"\"\n return self._calibration_matrix_inv\n\n @property\n def distortion_coeffs(self):\n \"\"\"The distortion coefficients on the form [k1, k2, p1, p2, k3].\"\"\"\n return self._distortion_coeffs\n\n @property\n def image_size(self):\n \"\"\"The image size\"\"\"\n return self._image_size\n\n @property\n def principal_point(self):\n \"\"\"The principal point (p_u, p_v)\"\"\"\n return self._calibration_matrix[0, 2], self._calibration_matrix[1, 2]\n\n @property\n def focal_lengths(self):\n \"\"\"The focal lengths (f_u, f_v)\"\"\"\n return self._calibration_matrix[0, 0], self._calibration_matrix[1, 1]\n\n @staticmethod\n def looks_at_pose(camera_pos_w: np.ndarray, target_pos_w: np.ndarray, up_vector_w: np.ndarray):\n \"\"\"Computes the pose for a camera that looks at a given point.\"\"\"\n cam_to_target_w = target_pos_w - camera_pos_w\n cam_z_w = cam_to_target_w.flatten() / np.linalg.norm(cam_to_target_w)\n\n cam_to_right_w = np.cross(-up_vector_w.flatten(), cam_z_w)\n cam_x_w = cam_to_right_w / np.linalg.norm(cam_to_target_w)\n\n cam_y_w = np.cross(cam_z_w, cam_x_w)\n\n return SE3((SO3(np.vstack((cam_x_w, cam_y_w, cam_z_w)).T), camera_pos_w))\n\n @staticmethod\n def jac_project_world_to_normalised_wrt_pose_w_c(pose_c_w: SE3, x_w: np.ndarray):\n \"\"\"Computes the Jacobian for the projection of a world point to normalised coordinates wrt camera pose\"\"\"\n x_c = (pose_c_w * x_w).flatten()\n\n d = 1 / x_c[-1]\n xn = d * x_c\n\n return np.array([[-d, 0, d * xn[0], xn[0] * xn[1], -1 - xn[0] ** 2, xn[1]],\n [0, -d, d * xn[1], 1 + xn[1] ** 2, -xn[0] * xn[1], -xn[0]]])\n\n @staticmethod\n def project_to_normalised_3d(x_c: np.ndarray):\n \"\"\"Projects a 3D point in the camera coordinate system onto the 3D normalised image plane\"\"\"\n return x_c / x_c[-1]\n\n @classmethod\n def project_to_normalised(cls, x_c: np.ndarray):\n \"\"\"Projects a 3D point in the camera coordinate system onto the 2D normalised image plane\"\"\"\n xn = cls.project_to_normalised_3d(x_c)\n return xn[:2]\n\n @classmethod\n def reprojection_error_normalised(cls, x_c: np.ndarray, measured_x_n: np.ndarray):\n \"\"\"Computes the reprojection error in normalised image coordinates\"\"\"\n return measured_x_n[:2] - cls.project_to_normalised(x_c)\n\n\ndef retain_best(keypoints, num_to_keep):\n \"\"\"Retains the given number of keypoints with highest response\"\"\"\n num_to_keep = np.minimum(num_to_keep, len(keypoints))\n best = np.argpartition([p.response for p in keypoints], -num_to_keep)[-num_to_keep:]\n return best\n\n\ndef extract_good_ratio_matches(matches, max_ratio):\n \"\"\"\n Extracts a set of good matches according to the ratio test.\n\n :param matches: Input set of matches, the best and the second best match for each putative correspondence.\n :param max_ratio: Maximum acceptable ratio between the best and the next best match.\n :return: The set of matches that pass the ratio test.\n \"\"\"\n if len(matches) == 0:\n return ()\n\n matches_arr = np.asarray(matches)\n distances = np.array([m.distance for m in matches_arr.ravel()]).reshape(matches_arr.shape)\n good = distances[:, 0] < distances[:, 1] * max_ratio\n\n # Return a tuple of good DMatch objects.\n return tuple(matches_arr[good, 0])\n\n\nclass PlaneReference:\n \"\"\"Represents the transformation between world image pixels and 3D world plane coordinates\"\"\"\n\n def __init__(\n self,\n image_size: Size,\n scene_size: Size,\n origin=np.array([0.0, 0.0, 0.0]),\n x_dir=np.array([1.0, 0.0, 0.0]),\n y_dir=np.array([0.0, 1.0, 0.0])\n ):\n \"\"\"\n :param image_size: as output from image.shape\n :param scene_size:\n :param origin:\n :param x_dir:\n :param y_dir:\n \"\"\"\n self._origin = np.asarray(origin)\n self._x_dir = np.asarray(x_dir)\n self._y_dir = np.asarray(y_dir)\n\n self._units_per_pixel_x = scene_size.width / image_size.width\n self._units_per_pixel_y = scene_size.height / image_size.height\n\n def pixel_to_world(self, pixel):\n \"\"\"Computes the corresponding world coordinate for a \"world image\" pixel\"\"\"\n pixel = np.atleast_2d(pixel)\n return \\\n self._origin \\\n + np.einsum('i,j->ij', self._units_per_pixel_x * pixel[:, 0], self._x_dir) \\\n + np.einsum('i,j->ij', self._units_per_pixel_y * pixel[:, 1], self._y_dir)\n\n\nclass PlaneWorldModel:\n \"\"\"Represents a planar world.\"\"\"\n\n def __init__(self, world_image: np.array, world_size: Size, grid_length: float):\n \"\"\"\n Constructs the world model.\n\n :param world_image: The world map image.\n :param world_size: The physical size of the world corresponding to the image in meters.\n :param grid_length: Length of the grid cells in the world image in meters.\n \"\"\"\n self._world_image = world_image\n self._world_size = world_size\n self._grid_length = grid_length\n self._max_num_points = 1000\n self._max_ratio = 0.8\n\n self._construct_world()\n\n @property\n def world_image(self):\n return self._world_image\n\n @property\n def world_size(self):\n return self._world_size\n\n @property\n def grid_length(self):\n return self._grid_length\n\n def _construct_world(self):\n # Convert to gray scale.\n gray_img = cv2.cvtColor(self._world_image, cv2.COLOR_BGR2GRAY)\n\n # Set up objects for detection, description and matching.\n self._detector = cv2.ORB_create(nfeatures=1000)\n self._desc_extractor = cv2.ORB_create()\n self._matcher = cv2.BFMatcher_create(self._desc_extractor.defaultNorm())\n\n # Detect keypoints\n keypoints = np.asarray(self._detector.detect(gray_img))\n best = retain_best(keypoints, self._max_num_points)\n keypoints = keypoints[best]\n\n # Compute descriptors for each keypoint.\n keypoints, new_descriptors = self._desc_extractor.compute(gray_img, keypoints)\n\n # Store points and descriptors.\n ref = PlaneReference(\n Size.from_numpy_shape(self._world_image.shape),\n self._world_size,\n np.array([-0.5 * self._world_size.width, 0.5 * self._world_size.height, 0.0]),\n np.array([1.0, 0.0, 0.0]),\n np.array([0.0, -1., 0.0]),\n )\n\n pixels = np.array([k.pt for k in keypoints])\n self._world_points = ref.pixel_to_world(pixels)\n self._descriptors = new_descriptors\n\n def find_correspondences(self, frame: np.ndarray):\n \"\"\"Computes correspondences between the world image and a given frame\"\"\"\n\n # Detect keypoints\n frame_keypoints = self._detector.detect(frame)\n\n # Compute descriptors for each keypoint.\n frame_keypoints, frame_descriptors = self._desc_extractor.compute(frame, frame_keypoints)\n\n # Do matching step and ratio test to remove bad points.\n matches = self._matcher.knnMatch(frame_descriptors, self._descriptors, k=2)\n good_matches = extract_good_ratio_matches(matches, max_ratio=self._max_ratio)\n\n frame_idx = [m.queryIdx for m in good_matches]\n world_descriptor_idx = [m.trainIdx for m in good_matches]\n\n # # Extract good 2d-3d matches.\n image_points = np.array([k.pt for k in np.asarray(frame_keypoints)[frame_idx]])\n world_points = self._world_points[world_descriptor_idx]\n\n return image_points, world_points\n","repo_name":"tek5030/lab-pose-estimation-py","sub_path":"common_lab_utils.py","file_name":"common_lab_utils.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"39652706112","text":"#!/usr/bin/env python\n\n# Author: Dan Walsh \nfrom distutils.core import setup\nimport Atomic as _Atomic\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"atomic\", scripts=[\"atomic\", \"atomic_dbus.py\"],\n version=_Atomic.__version__,\n author=_Atomic.__author__,\n author_email=_Atomic.__author_email__,\n packages=[\"Atomic\", \"Atomic/backends\", \"Atomic/objects\"],\n data_files=[('/etc/dbus-1/system.d/', [\"org.atomic.conf\"]),\n ('/usr/share/dbus-1/system-services', [\"org.atomic.service\"]),\n ('/usr/share/polkit-1/actions/', [\"org.atomic.policy\"]),\n (\"/usr/share/bash-completion/completions/\",\n [\"bash/atomic\"])]\n)\n","repo_name":"projectatomic/atomic","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":521,"dataset":"github-code","pt":"91"} +{"seq_id":"39337822625","text":"'''Feed plugin. \u0002!search google searchterm\u0002 will search google with searchterm and return the first result. \u0002@wikipedia searchterm\u0002 will search wikipedia and return the first paragraph of the closest match. \u0002!search imdb movie title\u0002 searches for movie title. \u0002!search noslang word\u0002 will try to translate word.'''\nimport eyercbot\nimport eyercbot.httplib2 as httplib2\nimport urllib.request\nfrom urllib.parse import urlencode\n\n#import datetime\nimport re\nimport xml.etree.ElementTree as etree\n\n\n# Bold: \u0002\n\nHAS_CONFIG = True\nCONFIG_VERSION = 3\nconfig = {'feeds':{'example name': {'url': 'http://nowhere.com/rss', 'servers': {'exampleServer': ['#example']},'lastid': '0'}},\n 'frequency':'*/30 * * * *', 'length': 1000}\n\ndefault_headers = {\n 'User-agent': 'Mozilla/5.0 (compatible; utils.web python module)'\n }\n \n# Normally cache should be used, however some feeds do not update their etag\n# I'm looking at you civfanatics!\nbrowser = httplib2.Http()\n\ndef auto_feed(*args):\n for feed in eyercbot.config['plugin_config']['feed']['feeds']:\n for server in eyercbot.config['plugin_config']['feed']['feeds'][feed]['servers']:\n for channel in eyercbot.config['plugin_config']['feed']['feeds'][feed]['servers'][server]:\n get_feed(server, '', channel, feed, auto=True)\n\ndef get_feed(server, user, target, message, auto=False):\n if not message:\n eyercbot.send('sendMsg', server, user, target, 'Please enter a feed name')\n return\n key = message\n if key not in eyercbot.config['plugin_config']['feed']['feeds']:\n eyercbot.send('sendMsg', server, user, target, 'Feed does not exist.')\n return\n response, content = browser.request(eyercbot.config['plugin_config']['feed']['feeds'][key]['url'])\n data = content.decode(response['content-type'].split('=')[1])\n tree = etree.fromstring(data)\n channel = tree.getchildren()[0]\n feedtitle = channel.find('title').text\n feedlink = channel.find('link').text\n child = channel.find('item')\n title = child.find('title').text\n link = child.find('link').text\n guid = child.find('guid').text\n if eyercbot.config['plugin_config']['feed']['feeds'][key]['lastid'] == guid and auto:\n return\n content = eyercbot.bbcode2irc(eyercbot.html2irc(child.find('{http://purl.org/rss/1.0/modules/content/}encoded').text))\n response = 'From: ' + feedtitle\n eyercbot.send('sendMsg', server, user, target, response)\n response = '\u0002' + title + '\u0002: ' + content\n #print(\"response:\",response[0:400])\n eyercbot.send('sendMsg', server, user, target, response[:eyercbot.config['plugin_config']['feed']['length']])\n eyercbot.send('sendMsg', server, user, target, link)\n eyercbot.config['plugin_config']['feed']['feeds'][key]\n eyercbot.config['plugin_config']['feed']['feeds'][key]['lastid'] = guid\n\ndef list_feeds(server, user, target, message):\n eyercbot.send('sendMsg', server, user, target, str(list(eyercbot.config['plugin_config']['feed']['feeds'].keys())))\n \n\n# The function maps the function to the input\n# These need to be unique names, otherwise undesired plugin may be called!\nalias_map = {\"get feed\": get_feed, 'list feeds': list_feeds}\n \n\nif 'feed' in eyercbot.config['plugin_config']:\n minute, hour, day, month, day_of_week = eyercbot.config['plugin_config']['feed']['frequency'].split(' ')\n eyercbot.scheduler.add_cron_job(auto_feed, second='0', month=month, day=day, hour=hour, minute=minute, day_of_week=day_of_week)\n# eyercbot.scheduler.add(\"Feed auto update\", \n# datetime.datetime.utcnow() + datetime.timedelta(days=int(days), hours=int(hours), minutes=int(minutes), seconds=int(seconds)), \n# eyercbot.config['plugin_config']['feed']['frequency'], auto_feed)\n \n''' \nshamelessly taken from the rssnews.tcl for eggdrop\n\n" \\x22 ' \\x27 & \\x26 < \\x3C\n > \\x3E \\x20 ¡ \\xA1 ¤ \\xA4\n ¢ \\xA2 £ \\xA3 ¥ \\xA5 ¦ \\xA6\n § \\xA7 ¨ \\xA8 © \\xA9 ª \\xAA\n « \\xAB ¬ \\xAC ­ \\xAD ® \\xAE\n ¯ \\xAF ° \\xB0 ± \\xB1 ² \\xB2\n ³ \\xB3 ´ \\xB4 µ \\xB5 ¶ \\xB6\n · \\xB7 ¸ \\xB8 ¹ \\xB9 º \\xBA\n » \\xBB ¼ \\xBC ½ \\xBD ¾ \\xBE\n ¿ \\xBF × \\xD7 ÷ \\xF7 À \\xC0\n Á \\xC1  \\xC2 à \\xC3 Ä \\xC4\n Å \\xC5 Æ \\xC6 Ç \\xC7 È \\xC8\n É \\xC9 Ê \\xCA Ë \\xCB Ì \\xCC\n Í \\xCD Î \\xCE Ï \\xCF Ð \\xD0\n Ñ \\xD1 Ò \\xD2 Ó \\xD3 Ô \\xD4\n Õ \\xD5 Ö \\xD6 Ø \\xD8 Ù \\xD9\n Ú \\xDA Û \\xDB Ü \\xDC Ý \\xDD\n Þ \\xDE ß \\xDF à \\xE0 á \\xE1\n â \\xE2 ã \\xE3 ä \\xE4 å \\xE5\n æ \\xE6 ç \\xE7 è \\xE8 é \\xE9\n ê \\xEA ë \\xEB ì \\xEC í \\xED\n î \\xEE ï \\xEF ð \\xF0 ñ \\xF1\n ò \\xF2 ó \\xF3 ô \\xF4 õ \\xF5\n ö \\xF6 ø \\xF8 ù \\xF9 ú \\xFA\n û \\xFB ü \\xFC ý \\xFD þ \\xFE\n ÿ \\xFF\n'''\n","repo_name":"BGCX067/eyercbot-svn-to-git","sub_path":"trunk/eyercbot/plugins/feed/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"33416223566","text":"# Given the root of a binary tree, return the postorder traversal of its nodes' values.\n\nclass TreeNode(object):\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\ndef postorderTraversal(root):\n stack = [root]\n visit = [False]\n result = []\n\n while stack:\n current, visited = stack.pop(), visit.pop()\n\n if current:\n if visited:\n result.append(current.val)\n else:\n stack.append(current)\n visit.append(True)\n stack.append(current.right)\n visit.append(False)\n stack.append(current.left)\n visit.append(False)\n\n return result\n","repo_name":"Primike/Algorithms","sub_path":"python/Neetcode/BinaryTrees/Easy/145.BinaryTreePostorder.py","file_name":"145.BinaryTreePostorder.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"33535253296","text":"import re\nimport time\nimport os\nimport random\n\nfrom selenium import webdriver\n\n# Package for the model speech\nimport playsound\nfrom gtts import gTTS\n# Package for voice input and translating into file format\nimport openai\nimport sounddevice as sd\nimport soundfile as sf\n\n# Representing whether the program is running or not\ncurrent_status = 0\n# Where we execute this program\ndriver = webdriver.Chrome('') # Use own path for testing\n\n# Generating voice responses\ndef model_speak(voice_data):\n gtts = gTTS(text=voice_data, lang='en')\n r = random.randint(1,100)\n audio_file = 'audio_' +str(r)+'.mp3'\n gtts.save(audio_file)\n print(voice_data)\n playsound.playsound(audio_file)\n os.remove(audio_file)\n\nmodel_speak(\"What can I help you?\")\n\n# setting for voice input\nduration = 3 # recording duration in seconds\nsample_rate = 44100 # audio sample rate\nchannels = 1 # number of audio channels\n\n# Take input(voice) and translate into mp3 format\ndef user_speak(langdetect=None):\n audio = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=channels)\n sd.wait()\n r = random.randint(1, 100)\n\n # Save audio to MP3 file\n filename = \"audio_\" +str(r)+\".mp3\"\n sf.write(filename, audio, sample_rate)\n\n # Transcribe audio using OpenAI API\n openai.api_key = \"\" # Use your own key\n\n with open(filename, \"rb\") as f:\n transcript = openai.Audio.transcribe(\"whisper-1\", f)\n \n os.remove(filename)\n return transcript['text'].lower()\n\n\ndef respond(voice_data):\n if re.match(\"open\\sgoogle\", voice_data):\n driver.get('http://google.com')\n model_speak(\"Is there anything more I can do for you?\")\n elif re.match(\"open\\syoutube\", voice_data):\n driver.get('http://youtube.com')\n model_speak(\"Is there anything more I can do for you?\")\n elif re.match(\"stop\", voice_data):\n time.sleep(10)\n elif re.match(r\"^search\\s+\", voice_data):\n model_speak(\"Identifying speech...\")\n words = voice_data.split()\n search_string = ''\n for word in range(1, len(words)):\n search_string += words[word]\n url = 'http://google.com/search?q=' + search_string\n driver.get('http://google.com/search?q=' + search_string)\n model_speak(\"Is there anything more I can do for you?\")\n elif re.match(r\"create\\snew\\stab\", voice_data):\n driver.execute_script(\"window.open('');\")\n model_speak(\"Is there anything more I can do for you?\")\n elif re.match(r\"switch\\stab\", voice_data):\n num_of_tabs = len(driver.window_handles)\n current_tab = 0\n for i in range(num_of_tabs):\n if driver.window_handles[i] == driver.current_window_handle:\n if i != num_of_tabs - 1:\n current_tab = i + 1\n break\n driver.switch_to.window(driver.window_handles[current_tab])\n model_speak(\"Is there anything more I can do for you?\")\n elif re.match(r\"close\\stab\", voice_data):\n driver.close()\n model_speak(\"Is there anything more I can do for you?\")\n elif re.match(r\"go\\sback\", voice_data):\n driver.back()\n model_speak(\"Is there anything more I can do for you?\")\n elif re.match(r\"go\\sforward\", voice_data):\n driver.forward()\n elif re.match(r\"exit\", voice_data):\n model_speak(\"See you next time sir\")\n driver.quit()\n else:\n model_speak(\"Invalid command, please try again\")\n exit()\n\n\nwhile True:\n\n voice_data = user_speak()\n print(voice_data)\n respond(voice_data)\n\n","repo_name":"Wonchang0314/python_voice_assistant","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"16022645824","text":"#!/usr/bin/python\n# coding: utf-8\n\nclass MatrixMethod(object):\n\tdef __init__(self, matrix):\n\t\tself.matrix = matrix\n\t\tfor i, string in enumerate(self.matrix):\n\t\t\tself.matrix[i] = [int(digit) for digit in string]\n\n\t\tself.cols = len(self.matrix[0])\n\t\tself.rows = len(self.matrix)\n\n\t\tself.code = []\n\n\tdef coding(self, information):\n\t\tinformation = [int(i) for i in information]\n\t\tredundantElements = [0 for _ in xrange(self.cols)]\n\t\t\n\t\tfor j in xrange(self.cols):\n\t\t\tfor i in xrange(self.rows):\n\t\t\t\tif self.matrix[i][j]:\n\t\t\t\t\tredundantElements[j] ^= information[i]\n\n\t\tself.code = information + redundantElements\n\t\treturn self.code\n\n\tdef decoding(self, errors):\n\t\terrors = [int(i) for i in errors]\n\t\tsyndrome = [0 for _ in xrange(self.cols)]\n\n\t\tcheckMatrix = self.matrix + generateIdentityMatrix(self.cols)\n\t\tcode = self.makeErrors(errors)\n\n\t\tfor j in xrange(self.cols):\n\t\t\tfor i in xrange(self.cols + self.rows):\n\t\t\t\tif code[i]:\n\t\t\t\t\tsyndrome[j] ^= checkMatrix[i][j]\n\n\t\terror = 0\n\t\tfor element in syndrome:\n\t\t\tif element:\n\t\t\t\terror = 1\n\t\t\t\tbreak\n\n\t\tfixed = list(code)\n\t\tif not error:\n\t\t\terrorInfo = 'Нет ошибок'\n\t\telse:\t\t\n\t\t\tif syndrome in checkMatrix:\n\t\t\t\terrorIndex = checkMatrix.index(syndrome)\n\t\t\t\terrorInfo = '1 ошибка: в %d элементе' %(errorIndex + 1)\n\n\t\t\t\tfixed[errorIndex] ^= 1\n\t\t\telse:\n\t\t\t\tindexMatrix, checkMatrix = generateCheckMatrixForTwoErrors(checkMatrix)\n\t\t\t\tif syndrome in checkMatrix:\n\t\t\t\t\terrorIndex = indexMatrix[checkMatrix.index(syndrome)]\n\t\t\t\t\terrorInfo = '2 ошибки: в %d и %d элементах' %(errorIndex[0] + 1, errorIndex[1] + 1)\n\n\t\t\t\t\tfixed[errorIndex[0]] ^= 1\n\t\t\t\t\tfixed[errorIndex[1]] ^= 1\n\t\t\t\telse:\n\t\t\t\t\terrorInfo = 'Больше 2 ошибок'\n\n\t\treturn (errorInfo, code, fixed)\n\n\tdef makeErrors(self, errors):\n\t\tcode = list(self.code)\n\n\t\tfor i, error in enumerate(errors, 0):\n\t\t\tif error:\n\t\t\t\tcode[i] ^= 1\n\n\t\treturn code\n\ndef generateIdentityMatrix(n):\n\tif n < 1:\n\t\traise 'n must be >= 1'\n\n\tmatrix = [[0 for _ in xrange(n)] for _ in xrange(n)]\n\n\tfor i in xrange(n):\n\t\tfor j in xrange(n):\n\t\t\tif i == j:\n\t\t\t\tmatrix[i][j] = 1\n\treturn matrix\n\ndef generateCheckMatrixForTwoErrors(checkMatrix):\n\tdef xorLists(a, b):\n\t\tif len(a) != len(b):\n\t\t\traise 'Length of lists must be equal'\n\t\t\n\t\tlength = len(a)\n\n\t\tresult = [0 for _ in xrange(length)]\n\t\tfor i in xrange(length):\n\t\t\tresult[i] = a[i] ^ b[i]\n\n\t\treturn result\n\n\tcols = len(checkMatrix[0])\n\trows = len(checkMatrix)\n\n\tmatrix = [[0 for _ in xrange(cols)] for _ in xrange(rows * (rows - 1))]\n\tindexMatrix = ['' for _ in xrange(rows * (rows - 1))]\n\n\tcur = 0\n\tfor i in xrange(rows):\n\t\tfor j in xrange(1, rows):\n\t\t\tindexMatrix[cur] = (i, j)\n\t\t\tmatrix[cur] = xorLists(checkMatrix[i], checkMatrix[j])\n\t\t\tcur += 1\n\n\treturn (indexMatrix, matrix)","repo_name":"rozetko/otpds","sub_path":"coding.py","file_name":"coding.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"588942907","text":"import speech_recognition as sr\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport cv2\r\nimport os\r\nfrom PIL import Image, ImageTk\r\nfrom itertools import count\r\nimport tkinter as tk\r\nimport string\r\n\r\nisl_gif = ['account closing', 'account currency', 'address', 'afternoon', 'ahemdabad', 'all', 'annually', 'any questions',\r\n 'any', 'anyone', 'are you angry', 'are you busy', 'are you hungry', 'assam', 'assets', 'audit', 'august', \r\n 'automatic', 'balance', 'banana', 'banaras', 'banglore', 'be careful', 'black ice', 'blizzard', 'breezy',\r\n 'bridge', 'card', 'cash', 'cat', 'christmas', 'church', 'cilinic', 'clear skies', 'cold', 'compound interest',\r\n 'credit', 'current account', 'dasara', 'day', 'december', 'default', 'degrees', 'did you finish homework', 'direct', \r\n 'do you have money', 'do you want something to drink', 'do you watch tv', 'dont worry', 'dry', 'dusk', 'dust storm',\r\n 'each', 'early', 'either', 'else', 'evening', 'every monday', 'every tuesday', 'every two months', 'every two weeks',\r\n 'every two years', 'every wednesday', 'every week', 'everyday', 'everyone', 'everything', 'february', 'few', 'flower is beautiful',\r\n 'fraud', 'friday', 'fund', 'good afternoon', 'good morning', 'good question', 'grapes', 'hail', 'he', 'heat wave', 'heavy rain',\r\n 'hello', 'her', 'herself', 'him', 'himself', 'hindu', 'hot', 'hour', 'humid', 'hyderabad', 'i am a clerk', 'i am fine', 'i am sorry',\r\n 'i am thinking', 'i am tired', 'i go to a theatre', 'i had to say something but i forgot', 'i like pink colour', 'i love to shop',\r\n 'i', 'ice', 'interest rate', 'investment', 'it', 'itself', 'january', 'job', 'july', 'june', 'karnataka', 'kerala', 'krishna', 'late night',\r\n 'late', 'lend', 'lets go for lunch', 'lightning', 'liquidation', 'loan', 'mango', 'many', 'march', 'may', 'me', 'midnight', 'mile', 'monday',\r\n 'money', 'monthly', 'morning dew', 'morning', 'mortgage', 'mumbai', 'my', 'myself', 'nagpur', 'net profit', 'nice to meet you', 'night', 'none',\r\n 'noon', 'nothing', 'november', 'open the door', 'our', 'ourselves', 'overdraft', 'pakistan', 'password', 'paying slip', 'please call me later',\r\n 'please wait for sometime', 'police station', 'post office', 'pouring rain', 'principal', 'pune', 'punjab', 'rainbow', 'refinance', 'reserve fund', \r\n 'saturday', 'scattered rain', 'scattered snow', 'second', 'security collateral', 'security', 'shall i help you', 'shall we go together tommorow', \r\n 'share equity', 'shop', 'sign language interpreter', 'sit down', 'slippery walking', 'slippery', 'smog', 'snow', 'some', 'somebody', 'someone', \r\n 'something', 'soon', 'spring', 'stand up', 'statement of account', 'statement', 'stock', 'summer', 'sun', 'sunday', 'sunrise', 'sunset', 'take care', 'teller', 'temperature', 'temple', 'their', 'them', 'there was traffic jam', 'thunder', 'thursday', 'time', 'today', 'toilet', 'tomato', 'transaction', 'transfer', 'trust', 'tuesday', 'us', 'usa', 'village', 'visitor', 'we', 'weather', 'wednesday', 'weekend', 'what are you doing', 'what is the problem', \"what is today's date\", 'what is your father do', 'what is your mobile number', 'what is your name', 'what', 'whats up', 'where is the bathroom', 'where is the police station', 'which', 'who', 'windy', 'wintering', 'working hours', 'you are wrong', 'yourself']\r\narr = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',\r\n 'w', 'x', 'y', 'z']\r\nimport os\r\n\r\ndef user_input(input_text): \r\n try:\r\n a = input_text.lower()\r\n for c in string.punctuation:\r\n a = a.replace(c, \"\")\r\n if (a.lower() in isl_gif):\r\n class ImageLabel(tk.Label):\r\n \"\"\"a label that displays images, and plays them if they are gifs\"\"\"\r\n def load(self, im):\r\n if isinstance(im, str):\r\n im = Image.open(im)\r\n self.loc = 0\r\n self.frames = []\r\n try:\r\n for i in count(1):\r\n self.frames.append(ImageTk.PhotoImage(im.copy()))\r\n im.seek(i)\r\n except EOFError:\r\n pass\r\n try:\r\n self.delay = im.info['duration']\r\n except:\r\n self.delay = 100\r\n\r\n if len(self.frames) == 1:\r\n self.config(image=self.frames[0])\r\n else:\r\n self.next_frame()\r\n def unload(self):\r\n self.config(image=None)\r\n self.frames = None\r\n\r\n def next_frame(self):\r\n if self.frames:\r\n self.loc += 1\r\n self.loc %= len(self.frames)\r\n self.config(image=self.frames[self.loc])\r\n self.after(self.delay, self.next_frame)\r\n root = tk.Tk()\r\n lbl = ImageLabel(root)\r\n lbl.pack()\r\n gif_path = 'C:/Users/DELL/Downloads/Tech-It-Easy-main/Tech-It-Easy-main/download/{}.gif'.format(a.lower())\r\n if os.path.exists(gif_path):\r\n lbl.load(gif_path)\r\n root.mainloop()\r\n else:\r\n print(\"GIF file not found.\")\r\n else:\r\n word = input_text.split()\r\n for i in word:\r\n if (i.lower() in isl_gif):\r\n gif_path = 'C:/Users/DELL/Downloads/Tech-It-Easy-main/Tech-It-Easy-main/download/{}.gif'.format(i.lower())\r\n if os.path.exists(gif_path):\r\n im = Image.open(gif_path)\r\n im.show()\r\n else:\r\n print(\"GIF file not found.\")\r\n else:\r\n lst=[]\r\n a = i\r\n for letter in a:\r\n lst.append(letter)\r\n for k in lst:\r\n if(k.lower() in arr):\r\n img_path = 'C:/Users/DELL/Downloads/Tech-It-Easy-main/Tech-It-Easy-main/letters/{}.jpg'.format(k)\r\n if os.path.exists(img_path):\r\n im = Image.open(img_path)\r\n im.show()\r\n else:\r\n print(\"Image file not found.\")\r\n else :\r\n print(\"Word Not Found\") \r\n except Exception as e:\r\n print(f\"Error: {e}\")\r\n\r\npath_to_file = \"Speech to sign.txt\"\r\n \r\ndef speech_to_sign(path_to_file):\r\n with open(path_to_file, \"r\") as file:\r\n content = file.read() \r\n return user_input(content)","repo_name":"TIE2023/Update_TIE","sub_path":"Misc.py","file_name":"Misc.py","file_ext":"py","file_size_in_byte":7051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"6797643137","text":"try:\r\n input = raw_input\r\nexcept NameError:\r\n pass\r\n\r\nimport sys\r\nfrom pickle import load\r\n# Load dictionary with courses and their pre-requisites and co-requisites\r\ndict = load(open(\"dictCoursesPreCoReqs.p\", \"rb\"))\r\n\r\nprint (\"Welcome! This tool helps you find out which courses require the given course as a pre-req or co-req. I hope it is useful.\")\r\n\r\ndef interaction():\r\n\tprint (\"\")\r\n\r\n\tcourse_input = input(\"Which course would you like to look up? (Enter x to exit): \").upper()\r\n\tif (course_input != \"X\"):\r\n\t\tprint(\"\")\r\n\t\tpre_co_req_for = []\r\n\r\n\t\t# If the course that the user provided is not in the loaded dictionary, ask again\r\n\t\tif (not(course_input in dict)):\r\n\t\t\tprint (\"That is not a valid course\")\r\n\t\t\tinteraction()\r\n\t\t# Else, search the courses for which the provided course is a pre-requisite or co-requisite \r\n\t\t# and add them to a list.\r\n\t\telse:\r\n\t\t\tfor course, pre_co_reqs in dict.items():\r\n\t\t\t\tif (course_input in pre_co_reqs):\r\n\t\t\t\t\tpre_co_req_for.append(course)\r\n\r\n\t\t\tsys.stdout.write(course_input + \" is a pre-req or co-req for:\")\r\n\t\t\tprint(\"\")\r\n\t\t\t\r\n\t\t\tpre_co_req_for.sort()\r\n\t\t\tfor p in pre_co_req_for:\r\n\t\t\t\tsys.stdout.write(\"| \" + str(p) + \" |\")\r\n\t\t\tprint(\"\")\r\n\t\t\tprint(\"\")\r\n\r\n\t\t\tinteraction()\r\n\r\ninteraction()\r\n\r\n","repo_name":"rupertotorres1/UBCPreReqTool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"33669341803","text":"#!/Users/kchen/miniconda3/bin/python\n# Author: Kai Chen\n# Institute: INS, SJTU\n# Coarse grain causal analysis across cortical regions\n# Plot AUC vs. answer threshold.\n\nimport time\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rcParams['font.size']=20\nplt.rcParams['axes.labelsize']=25\nfrom fcpy.roc import scan_auc_threshold\nfrom fcpy.plot import gen_auc_threshold_figure\nfrom fcpy.utils import print_log\nfrom fcpy.core import EcogTDMI\nfrom argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\narg_default = {\n 'path': 'tdmi_snr_analysis/',\n 'is_interarea': False,\n}\nparser = ArgumentParser(\n prog='GC plot_auc_threshold',\n description = \"Generate figure for analysis of causality.\",\n formatter_class=ArgumentDefaultsHelpFormatter\n)\nparser.add_argument(\n 'path', \n default=arg_default['path'], \n nargs='?',\n type = str, \n help = \"path of working directory.\"\n)\nargs = parser.parse_args()\n\nstart = time.time()\n# Load SC and FC data\n# ==================================================\n# no_snr_mask\ndata_no_snr = EcogTDMI('data/')\ndata_no_snr.init_data()\nsc_no_snr, fc_no_snr = data_no_snr.get_sc_fc('cg')\n\ndata = EcogTDMI('data/')\ndata.init_data(args.path)\nsc, fc = data.get_sc_fc('cg')\n# ==================================================\n\nw_thresholds = np.logspace(-6, 0, num=7, base=10)\naucs = {}\naucs_no_snr = {}\nfor band in data.filters:\n aucs_no_snr[band], _ = scan_auc_threshold(fc_no_snr[band], sc_no_snr[band], w_thresholds)\n aucs[band], _= scan_auc_threshold(fc[band], sc[band], w_thresholds)\n\nfig = gen_auc_threshold_figure(aucs_no_snr, w_thresholds, labels=\"No SNR mask\")\ngen_auc_threshold_figure(aucs, w_thresholds, ax=np.array(fig.get_axes()), colors='orange', labels=\"SNR mask\")\n[axi.legend() for axi in fig.get_axes()[:-1]]\n\nfname = f'cg_auc-threshold_snr.png'\nfig.savefig(args.path + fname)\nprint_log(f'Figure save to {args.path+fname:s}.', start)\nwith open(args.path+f'cg_aucs.pkl', 'wb') as f:\n pickle.dump(aucs_no_snr, f)\n pickle.dump(aucs, f)\nprint_log(f'Figure save to {args.path:s}cg_aucs.pkl', start)","repo_name":"NeoNeuron/Monkey_ECoG","sub_path":"cg_auc_th_mi_snr.py","file_name":"cg_auc_th_mi_snr.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12244419858","text":"import pytest\nfrom BaseClass import BaseClassOne\n\n\n@pytest.mark.usefixtures(\"dataLoad\")\nclass TestExample2(BaseClassOne):\n def test_editProfile(self, dataLoad):\n log = self.getlogger()\n log.info(\"information printed\")\n log.info(dataLoad)\n log.info(dataLoad[2])\n\n\n# def test_crossBrowser(cross_browser):\n# print(cross_browser[1])","repo_name":"AshokGual/PythonSeleniumBasic","sub_path":"pytestDemo/test_FixtureDemoTwo.py","file_name":"test_FixtureDemoTwo.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"44936360909","text":"# Leo Projects \n# @Naviya2 🇱🇰\n\nfrom pyrogram.types.bots_and_keyboards import reply_keyboard_markup\nfrom LeoSongDownloaderBot.plugins import *\nfrom pyrogram import idle, filters\nfrom pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom LeoSongDownloaderBot import LeoSongDownloaderBot as app\nfrom LeoSongDownloaderBot import LOGGER\n\npm_start_text = \"\"\"\nHello [{}](tg://user?id={}) 👋\n\nI'm Leo Song Downloader Bot 🇱🇰\n\nYou can download any song within a shortime with this Bot 🙂\n\nIf you want to know how to use this bot just\ntouch on this command \" /help \" 🙂\n\nLeo Projects 🇱🇰\n\"\"\"\n\nhelp_text = \"\"\"\nYou should know the following commands to use this bot 🙂\n\n⭕️ /song : Download songs from all sources 😏\n\n⭕️ Send youtube url to me directly i can download it to your telegram database in audio format 🙂\n\n\nMade By : @cokepokess\nSupport Group : @Adam_hakli\nUpdates Channel : @cokepokess\n\"\"\"\n\n@app.on_message(filters.command(\"start\"))\nasync def start(client, message):\n chat_id = message.chat.id\n user_id = message.from_user[\"id\"]\n name = message.from_user[\"first_name\"]\n if message.chat.type == \"private\":\n btn = InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n text=\"Updates Channel🗣\", url=\"https://t.me/new_ehi\"\n ),\n InlineKeyboardButton(\n text=\"Support Group👥\", url=\"https://t.me/leosupportx\"\n ),\n ],\n \n [\n InlineKeyboardButton(\n text=\"Developer🧑‍💻\", url=\"https://t.me/naviya2\"\n ),\n InlineKeyboardButton(\n text=\"Rate us ★\", url=\"https://t.me/tlgrmcbot?start=leosongdownloaderbot-review\"\n ), \n ],\n \n [\n InlineKeyboardButton(\n text=\"➕ Add me to your group ➕\", url=\"t.me/leosongdownloaderbot?startgroup=true\"\n ),\n ],\n ],\n )\n else:\n btn = None\n await message.reply(pm_start_text.format(name, user_id), reply_markup=btn)\n\n@app.on_message(filters.command(\"help\"))\nasync def start(client, message):\n await message.reply(help_text)\n\napp.start()\nLOGGER.info(\"LeoSongDownloaderBot is online.\")\nidle()\n","repo_name":"cokepokess/ytsongbot","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"8376727017","text":"def respond(sender_id, message_text, attachment_type, attachment_url, postback, quick_reply, context):\n from bot.lib.project import update_project\n \"\"\"Takes in ``sender_id``, ``message_text``= #keywords,``context``= project id\n updates project and sends a reponse.\n\n :param str sender_id: The unique id created by facebook and the current facebook's sender's ID\n :param str message_text: Any text written by the send in the chat interface\n :param dict context: attributes sent between conversations\n :param str attachment_type: dentifies attachment type i.e photo (optional, defaults to None)\n :param str attachment_url: The location of the attachment (optional, defaults to None)\n :param str postback: a reponse sent from the user clicking a button (optional, defaults to None)\n :param str quick_reply: an automatic reply (optional, defaults to None)\n\n :returns: ``reponse``a dict with the next message to move the conversation\n ``new_context`` empty dict, and ``coverstation`` dict containing\n the next stage and task for the the bot\n \"\"\"\n\n tags = [i.strip() for i in message_text.split('#')]\n update_project(sender_id=sender_id, project_id=context['project_id'], tags=tags)\n new_context = dict()\n\n response = dict(message_text=\"You're all done! Head back to the menu if you want to do anything else\")\n conversation = dict(name='menu', stage='menu')\n return response, new_context, conversation\n\n\ndef validate(sender_id, message_text, attachment_type, postback, quick_reply):\n \"\"\"Boolean takes in ``message_text``= text\n and determines if the message type is valid.\n\n :param str sender_id: The unique id created by facebook and the current facebook's sender's ID\n :param str message_text: Any text written by the send in the chat interface\n :param str attachment_type: Identifies attachment type i.e photo (optional, defaults to None)\n :param str postback: a reponse sent from the user clicking a button (optional, defaults to None)\n :param str quick_reply: an automatic (optional, defaults to None)\n\n :returns: Booleen and a dict with message text if the message is not valid \"\"\"\n\n if message_text:\n return True, dict()\n else:\n return False, dict(message_text='Want to add some tags?')\n","repo_name":"acraftyhobbit/newbot","sub_path":"app/bot/conversations/create_project/add_tags.py","file_name":"add_tags.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"39188113443","text":"from __future__ import unicode_literals\nimport atexit\nimport desktop.log\nimport gunicorn.app.base\nimport logging\nimport logging.config\nimport os\nimport pkg_resources\nimport ssl\nimport sys\nimport tempfile\n\nfrom OpenSSL import crypto\nfrom multiprocessing.util import _exit_function\nfrom desktop import conf\nfrom desktop.lib.paths import get_desktop_root\nfrom django.core.management.base import BaseCommand\nfrom django.core.wsgi import get_wsgi_application\nfrom django.utils.translation import gettext as _\nfrom django.db import connection\nfrom gunicorn import util\nfrom six import iteritems\n\nGUNICORN_SERVER_HELP = r\"\"\"\n Run Hue using the Gunicorn WSGI server in asynchronous mode.\n\"\"\"\n\nPID_FILE = None\n\nclass Command(BaseCommand):\n help = _(\"Gunicorn Web server for Hue.\")\n\n def add_arguments(self, parser):\n parser.add_argument('--bind', help=_(\"Bind Address\"), action='store', default=None)\n\n def handle(self, *args, **options):\n start_server(args, options)\n\n def usage(self, subcommand):\n return GUNICORN_SERVER_HELP\n\ndef activate_translation():\n from django.conf import settings\n from django.utils import translation\n\n # Activate the current language, because it won't get activated later.\n try:\n translation.activate(settings.LANGUAGE_CODE)\n except AttributeError:\n pass\n\ndef number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1\n\ndef handler_app(environ, start_response):\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"desktop.settings\")\n return get_wsgi_application()\n\ndef post_fork(server, worker):\n global PID_FILE\n with open(PID_FILE, \"a\") as f:\n f.write(\"%s\\n\"%worker.pid)\n\ndef post_worker_init(worker):\n connection.connect()\n\ndef worker_int(worker):\n connection.close()\n\ndef enable_logging(args, options):\n HUE_DESKTOP_VERSION = pkg_resources.get_distribution(\"desktop\").version or \"Unknown\"\n # Start basic logging as soon as possible.\n if \"HUE_PROCESS_NAME\" not in os.environ:\n _proc = os.path.basename(len(sys.argv) > 1 and sys.argv[1] or sys.argv[0])\n os.environ[\"HUE_PROCESS_NAME\"] = _proc\n\n desktop.log.basic_logging(os.environ[\"HUE_PROCESS_NAME\"])\n logging.info(\"Welcome to Hue from Gunicorn server \" + HUE_DESKTOP_VERSION)\n\nclass StandaloneApplication(gunicorn.app.base.BaseApplication):\n def __init__(self, app, options=None):\n self.options = options or {}\n self.app_uri = 'desktop.wsgi:application'\n super(StandaloneApplication, self).__init__()\n\n def load_config(self):\n config = dict([(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)\n\n def chdir(self):\n # chdir to the configured path before loading,\n # default is the current dir\n os.chdir(self.cfg.chdir)\n\n # add the path to sys.path\n sys.path.insert(0, self.cfg.chdir)\n\n def load_wsgiapp(self):\n self.chdir()\n\n # load the app\n return util.import_app(self.app_uri)\n\n def load(self):\n return self.load_wsgiapp()\n\ndef argprocessing(args=[], options={}):\n global PID_FILE\n if options['bind']:\n http_port = \"8888\"\n bind_addr = options['bind']\n if \":\" in bind_addr:\n http_port = bind_addr.split(\":\")[1]\n PID_FILE = \"/tmp/hue_%s.pid\" % (http_port)\n else:\n bind_addr = conf.HTTP_HOST.get() + \":\" + str(conf.HTTP_PORT.get())\n PID_FILE = \"/tmp/hue_%s.pid\" % (conf.HTTP_PORT.get())\n options['bind_addr'] = bind_addr\n\n # Currently gunicorn does not support passphrase suppored SSL Keyfile\n # https://github.com/benoitc/gunicorn/issues/2410\n ssl_keyfile = None\n worker_tmp_dir = os.environ.get(\"HUE_CONF_DIR\", get_desktop_root(\"conf\"))\n if not worker_tmp_dir:\n worker_tmp_dir = \"/tmp\"\n options['worker_tmp_dir'] = worker_tmp_dir\n if conf.SSL_CERTIFICATE.get() and conf.SSL_PRIVATE_KEY.get():\n ssl_password = str.encode(conf.get_ssl_password()) if conf.get_ssl_password() is not None else None\n if ssl_password:\n with open(conf.SSL_PRIVATE_KEY.get(), 'r') as f:\n with tempfile.NamedTemporaryFile(dir=worker_tmp_dir, delete=False) as tf:\n tf.write(crypto.dump_privatekey(crypto.FILETYPE_PEM,\n crypto.load_privatekey(crypto.FILETYPE_PEM,\n f.read(), ssl_password)))\n ssl_keyfile = tf.name\n else:\n ssl_keyfile = conf.SSL_PRIVATE_KEY.get()\n options['ssl_keyfile'] = ssl_keyfile\n\ndef rungunicornserver(args=[], options={}):\n gunicorn_options = {\n 'accesslog': \"-\",\n 'access_log_format': \"%({x-forwarded-for}i)s %(h)s %(l)s %(u)s %(t)s '%(r)s' %(s)s %(b)s '%(f)s' '%(a)s'\",\n 'backlog': 2048,\n 'bind': [options['bind_addr']],\n 'ca_certs': conf.SSL_CACERTS.get(), # CA certificates file\n 'capture_output': True,\n 'cert_reqs': None, # Whether client certificate is required (see stdlib ssl module)\n 'certfile': conf.SSL_CERTIFICATE.get(), # SSL certificate file\n 'chdir': None,\n 'check_config': None,\n 'ciphers': conf.SSL_CIPHER_LIST.get(), # Ciphers to use (see stdlib ssl module)\n 'config': None,\n 'daemon': None,\n 'do_handshake_on_connect': False, # Whether to perform SSL handshake on socket connect.\n 'enable_stdio_inheritance': None,\n 'errorlog': \"-\",\n 'forwarded_allow_ips': None,\n 'graceful_timeout': 900, # Timeout for graceful workers restart.\n 'group': conf.SERVER_GROUP.get(),\n 'initgroups': None,\n 'keepalive': 120, # seconds to wait for requests on a keep-alive connection.\n 'keyfile': options['ssl_keyfile'], # SSL key file\n 'limit_request_field_size': conf.LIMIT_REQUEST_FIELD_SIZE.get(),\n 'limit_request_fields': conf.LIMIT_REQUEST_FIELDS.get(),\n 'limit_request_line': conf.LIMIT_REQUEST_LINE.get(),\n 'loglevel': 'DEBUG' if conf.DJANGO_DEBUG_MODE.get() else 'INFO',\n 'max_requests': 1200, # The maximum number of requests a worker will process before restarting.\n 'max_requests_jitter': 0,\n 'paste': None,\n 'pidfile': None,\n 'preload_app': False,\n 'proc_name': \"hue\",\n 'proxy_allow_ips': None,\n 'proxy_protocol': None,\n 'pythonpath': None,\n 'raw_env': None,\n 'raw_paste_global_conf': None,\n 'reload': None,\n 'reload_engine': None,\n 'sendfile': True,\n 'spew': None,\n 'ssl_version': ssl.PROTOCOL_TLSv1_2, # SSL version to use\n 'statsd_host': None,\n 'statsd_prefix': None,\n 'suppress_ragged_eofs': None, # Suppress ragged EOFs (see stdlib ssl module)\n 'syslog': None,\n 'syslog_addr': None,\n 'syslog_facility': None,\n 'syslog_prefix': None,\n 'threads': conf.CHERRYPY_SERVER_THREADS.get(),\n 'timeout': 900, # Workers silent for more than this many seconds are killed and restarted.\n 'umask': None,\n 'user': conf.SERVER_USER.get(),\n 'worker_class': conf.GUNICORN_WORKER_CLASS.get(),\n 'worker_connections': 1000,\n 'worker_tmp_dir': options['worker_tmp_dir'],\n 'workers': conf.GUNICORN_NUMBER_OF_WORKERS.get() if conf.GUNICORN_NUMBER_OF_WORKERS.get() is not None else 5,\n 'post_fork': post_fork,\n 'post_worker_init': post_worker_init,\n 'worker_int': worker_int\n }\n StandaloneApplication(handler_app, gunicorn_options).run()\n\ndef start_server(args, options):\n global PID_FILE\n argprocessing(args, options)\n\n # Hide the Server software version in the response body\n gunicorn.SERVER_SOFTWARE = \"apache\"\n os.environ[\"SERVER_SOFTWARE\"] = gunicorn.SERVER_SOFTWARE\n\n # Activate django translation\n activate_translation()\n enable_logging(args, options)\n atexit.unregister(_exit_function)\n with open(PID_FILE, \"a\") as f:\n f.write(\"%s\\n\"%os.getpid())\n rungunicornserver(args, options)\n\nif __name__ == '__main__':\n start_server(args=sys.argv[1:], options={})\n","repo_name":"cloudera/hue","sub_path":"desktop/core/src/desktop/management/commands/rungunicornserver.py","file_name":"rungunicornserver.py","file_ext":"py","file_size_in_byte":8006,"program_lang":"python","lang":"en","doc_type":"code","stars":988,"dataset":"github-code","pt":"91"} +{"seq_id":"40898106012","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport logging.config\nimport yaml\n\n__all__ = ['setup_logging']\n\n\ndef setup_logging(default_path='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):\n \"\"\"\n Setup logging configuration\n\n Writes a log in the target type configured in the logger.yaml file (this component manages log levels,\n calling components, the log target to the console or a log file and format).\n\n If you want to know more about log : https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial\n\n :return: logging configuration\n :rtype: module\n \"\"\"\n\n path = './resource/' + default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n return logging\n","repo_name":"AntoineMeheut/GoodDevPractices","sub_path":"source/modules/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"11480462464","text":"import mock\n\nfrom collections import namedtuple\nfrom uuid import uuid1 as uuid\nfrom datetime import datetime\nfrom seantis.reservation.tests import IntegrationTestCase\n\nfrom seantis.reservation.session import (\n getUtility,\n ILibresUtility\n)\n\nfrom seantis.reservation import Session\nfrom libres.db.models import Allocation\n\n\ndef add_something(resource=None):\n resource = resource or uuid()\n allocation = Allocation(\n raster=15, resource=resource, mirror_of=resource)\n allocation.start = datetime(2011, 1, 1, 15)\n allocation.end = datetime(2011, 1, 1, 15, 59)\n allocation.group = uuid()\n\n Session.add(allocation)\n\n\nclass TestSession(IntegrationTestCase):\n\n @mock.patch('seantis.reservation.utils.get_config')\n def test_dsnconfig(self, get_config):\n util = getUtility(ILibresUtility)\n util._default_dsn = 'test://default'\n\n MockSite = namedtuple('MockSite', ['id'])\n\n get_config.return_value = None\n self.assertEqual(util.get_dsn(MockSite('test')), 'test://default')\n\n get_config.return_value = 'test://specific'\n self.assertEqual(util.get_dsn(MockSite('test2')), 'test://specific')\n\n get_config.return_value = 'test://{*}'\n self.assertEqual(util.get_dsn(MockSite('test3')), 'test://test3')\n\n util._default_dsn = 'test://{*}'\n get_config.return_value = None\n self.assertEqual(util.get_dsn(MockSite('test4')), 'test://test4')\n","repo_name":"seantis/seantis.reservation","sub_path":"seantis/reservation/tests/test_session.py","file_name":"test_session.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"91"} +{"seq_id":"23915194600","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Pandas Fundamentals\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# ## Pandas Data Structures: Series\n# \n# A Series represents a one-dimensional array of data. The main difference between a Series and numpy array is that a Series has an _index_. The index contains the labels that we use to access the data. This can be useful.\n# \n# There are many ways to [create a Series](https://pandas.pydata.org/pandas-docs/stable/dsintro.html#series). We will just show a few.\n# \n# (Data are from the NASA [Planetary Fact Sheet](https://nssdc.gsfc.nasa.gov/planetary/factsheet/).)\n\n# In[2]:\n\n\nnames = ['Mercury', 'Venus', 'Earth']\nvalues = [0.3e24, 4.87e24, 5.97e24]\nmasses = pd.Series(values, index=names)\nmasses\n\n\n# Series have built in plotting methods.\n\n# In[3]:\n\n\nmasses.plot(kind='bar')\n\n\n# Arithmetic operations and most numpy function can be applied to Series.\n# An important point is that the Series keep their index during such operations.\n\n# In[4]:\n\n\nnp.log(masses) / masses**2\n\n\n# We can access the underlying index object if we need to:\n\n# In[5]:\n\n\nmasses.index\n\n\n# ### Indexing\n# \n# We can get values back out using the index via the `.loc` attribute\n\n# In[6]:\n\n\nmasses.loc['Earth']\n\n\n# Or by raw position using `.iloc`\n\n# In[7]:\n\n\nmasses.iloc[2]\n\n\n# We can pass a list or array to loc to get multiple rows back:\n\n# In[8]:\n\n\nmasses.loc[['Venus', 'Earth']]\n\n\n# And we can even use slice notation\n\n# In[9]:\n\n\nmasses.loc['Mercury':'Earth']\n\n\n# In[10]:\n\n\nmasses.iloc[:2]\n\n\n# If we need to, we can always get the raw data back out as well\n\n# In[11]:\n\n\nmasses.values # a numpy array\n\n\n# In[12]:\n\n\nmasses.index # a pandas Index object\n\n\n# ## Pandas Data Structures: DataFrame\n# \n# There is a lot more to Series, but they are limit to a single \"column\". A more useful Pandas data structure is the DataFrame. A DataFrame is basically a bunch of series that share the same index. It's a lot like a table in a spreadsheet.\n# \n# Below we create a DataFrame.\n\n# In[13]:\n\n\n# first we create a dictionary\ndata = {'mass': [0.3e24, 4.87e24, 5.97e24], # kg\n 'diameter': [4879e3, 12_104e3, 12_756e3], # m\n 'rotation_period': [1407.6, np.nan, 23.9] # h\n }\ndf = pd.DataFrame(data, index=['Mercury', 'Venus', 'Earth'])\ndf\n\n\n# Pandas handles missing data very elegantly, keeping track of it through all calculations.\n\n# In[14]:\n\n\ndf.info()\n\n\n# A wide range of statistical functions are available on both Series and DataFrames.\n\n# In[15]:\n\n\ndf.min()\n\n\n# In[16]:\n\n\ndf.mean()\n\n\n# In[17]:\n\n\ndf.std()\n\n\n# In[18]:\n\n\ndf.describe()\n\n\n# We can get a single column as a Series using python's getitem syntax on the DataFrame object.\n\n# In[19]:\n\n\ndf['mass']\n\n\n# ...or using attribute syntax.\n\n# In[20]:\n\n\ndf.mass\n\n\n# Indexing works very similar to series\n\n# In[21]:\n\n\ndf.loc['Earth']\n\n\n# In[22]:\n\n\ndf.iloc[2]\n\n\n# But we can also specify the column we want to access\n\n# In[23]:\n\n\ndf.loc['Earth', 'mass']\n\n\n# In[24]:\n\n\ndf.iloc[:2, 0]\n\n\n# If we make a calculation using columns from the DataFrame, it will keep the same index:\n\n# In[25]:\n\n\nvolume = 4/3 * np.pi * (df.diameter/2)**3\ndf.mass / volume\n\n\n# Which we can easily add as another column to the DataFrame:\n\n# In[26]:\n\n\ndf['density'] = df.mass / volume\ndf\n\n\n# ## Merging Data\n# \n# Pandas supports a wide range of methods for merging different datasets. These are described extensively in the [documentation](https://pandas.pydata.org/pandas-docs/stable/merging.html). Here we just give a few examples.\n\n# In[27]:\n\n\ntemperature = pd.Series([167, 464, 15, -65],\n index=['Mercury', 'Venus', 'Earth', 'Mars'],\n name='temperature')\ntemperature\n\n\n# In[28]:\n\n\n# returns a new DataFrame\ndf.join(temperature)\n\n\n# In[29]:\n\n\n# returns a new DataFrame\ndf.join(temperature, how='right')\n\n\n# In[30]:\n\n\n# returns a new DataFrame\neveryone = df.reindex(['Mercury', 'Venus', 'Earth', 'Mars'])\neveryone\n\n\n# We can also index using a boolean series. This is very useful\n\n# In[31]:\n\n\nadults = df[df.mass > 4e24]\nadults\n\n\n# In[32]:\n\n\ndf['is_big'] = df.mass > 4e24\ndf\n\n\n# ### Modifying Values\n# \n# We often want to modify values in a dataframe based on some rule. To modify values, we need to use `.loc` or `.iloc`\n\n# In[33]:\n\n\ndf.loc['Earth', 'mass'] = 5.98+24\ndf.loc['Venus', 'diameter'] += 1\ndf\n\n\n# ## Plotting\n# \n# DataFrames have all kinds of [useful plotting](https://pandas.pydata.org/pandas-docs/stable/visualization.html) built in.\n\n# In[34]:\n\n\ndf.plot(kind='scatter', x='mass', y='diameter', grid=True)\n\n\n# In[35]:\n\n\ndf.plot(kind='bar')\n\n\n# ## Time Indexes\n# \n# Indexes are very powerful. They are a big part of why Pandas is so useful. There are different indices for different types of data. Time Indexes are especially great!\n\n# In[36]:\n\n\ntwo_years = pd.date_range(start='2014-01-01', end='2016-01-01', freq='D')\ntimeseries = pd.Series(np.sin(2 *np.pi *two_years.dayofyear / 365),\n index=two_years)\ntimeseries.plot()\n\n\n# We can use python's slicing notation inside `.loc` to select a date range.\n\n# In[37]:\n\n\ntimeseries.loc['2015-01-01':'2015-07-01'].plot()\n\n\n# The TimeIndex object has lots of useful attributes\n\n# In[38]:\n\n\ntimeseries.index.month\n\n\n# In[39]:\n\n\ntimeseries.index.day\n\n\n# ## Reading Data Files: Weather Station Data\n# \n# In this example, we will use NOAA weather station data from https://www.ncdc.noaa.gov/data-access/land-based-station-data.\n# \n# The details of files we are going to read are described in this [README file](ftp://ftp.ncdc.noaa.gov/pub/data/uscrn/products/daily01/README.txt).\n\n# In[40]:\n\n\nimport pooch\nPOOCH = pooch.create(\n path=pooch.os_cache(\"noaa-data\"),\n base_url=\"doi:10.5281/zenodo.5564850/\",\n registry={\n \"data.txt\": \"md5:5129dcfd19300eb8d4d8d1673fcfbcb4\",\n },\n)\ndatafile = POOCH.fetch(\"data.txt\")\ndatafile\n\n\n# In[41]:\n\n\nget_ipython().system(\" head '/home/jovyan/.cache/noaa-data/data.txt'\")\n\n\n# We now have a text file on our hard drive called `data.txt`. Examine it.\n# \n# To read it into pandas, we will use the [read_csv](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) function. This function is incredibly complex and powerful. You can use it to extract data from almost any text file. However, you need to understand how to use its various options.\n# \n# With no options, this is what we get.\n\n# In[42]:\n\n\ndf = pd.read_csv(datafile)\ndf.head()\n\n\n# Pandas failed to identify the different columns. This is because it was expecting standard CSV (comma-separated values) file. In our file, instead, the values are separated by whitespace. And not a single whilespace--the amount of whitespace between values varies. We can tell pandas this using the `sep` keyword.\n\n# In[43]:\n\n\ndf = pd.read_csv(datafile, sep='\\s+')\ndf.head()\n\n\n# Great! It worked. \n# \n# If we look closely, we will see there are lots of -99 and -9999 values in the file. The [README file](ftp://ftp.ncdc.noaa.gov/pub/data/uscrn/products/daily01/README.txt) tells us that these are values used to represent missing data. Let's tell this to pandas.\n\n# In[44]:\n\n\ndf = pd.read_csv(datafile, sep='\\s+', na_values=[-9999.0, -99.0])\ndf.head()\n\n\n# Great. The missing data is now represented by `NaN`.\n# \n# What data types did pandas infer?\n\n# In[45]:\n\n\ndf.info()\n\n\n# One problem here is that pandas did not recognize the `LDT_DATE` column as a date. Let's help it.\n\n# In[46]:\n\n\ndf = pd.read_csv(datafile, sep='\\s+',\n na_values=[-9999.0, -99.0],\n parse_dates=[1])\ndf.info()\n\n\n# It worked! Finally, let's tell pandas to use the date column as the index.\n\n# In[47]:\n\n\ndf = df.set_index('LST_DATE')\ndf.head()\n\n\n# We can now access values by time:\n\n# In[48]:\n\n\ndf.loc['2017-08-07']\n\n\n# Or use slicing to get a range:\n\n# In[49]:\n\n\ndf.loc['2017-07-01':'2017-07-31']\n\n\n# ### Quick Statistics\n\n# In[50]:\n\n\ndf.describe()\n\n\n# ### Plotting Values\n# \n# We can now quickly make plots of the data\n\n# In[51]:\n\n\nfig, ax = plt.subplots(ncols=2, nrows=2, figsize=(14,14))\n\ndf.iloc[:, 4:8].boxplot(ax=ax[0,0])\ndf.iloc[:, 10:14].boxplot(ax=ax[0,1])\ndf.iloc[:, 14:17].boxplot(ax=ax[1,0])\ndf.iloc[:, 18:22].boxplot(ax=ax[1,1])\n\n\nax[1, 1].set_xticklabels(ax[1, 1].get_xticklabels(), rotation=90);\n\n\n# Pandas is very \"time aware\":\n\n# In[52]:\n\n\ndf.T_DAILY_MEAN.plot()\n\n\n# Note: we could also manually create an axis and plot into it.\n\n# In[53]:\n\n\nfig, ax = plt.subplots()\ndf.T_DAILY_MEAN.plot(ax=ax)\nax.set_title('Pandas Made This!')\n\n\n# In[54]:\n\n\ndf[['T_DAILY_MIN', 'T_DAILY_MEAN', 'T_DAILY_MAX']].plot()\n\n\n# ### Resampling\n# \n# Since pandas understands time, we can use it to do resampling.\n\n# In[55]:\n\n\n# monthly reampler object\nrs_obj = df.resample('MS')\nrs_obj\n\n\n# In[56]:\n\n\nrs_obj.mean()\n\n\n# We can chain all of that together\n\n# In[57]:\n\n\ndf_mm = df.resample('MS').mean()\ndf_mm[['T_DAILY_MIN', 'T_DAILY_MEAN', 'T_DAILY_MAX']].plot()\n\n\n# Next time we will dig deeper into resampling, rolling means, and grouping operations (groupby).\n\n# In[ ]:\n\n\n\n\n","repo_name":"eoda-macs401/earth_ocean_data_analysis","sub_path":"_build/jupyter_execute/Notebooks/basic_pandas.py","file_name":"basic_pandas.py","file_ext":"py","file_size_in_byte":9051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4260362669","text":"import unittest\n\nfrom iconservice.base.address import ZERO_SCORE_ADDRESS, GOVERNANCE_SCORE_ADDRESS\nfrom iconservice.icon_constant import ConfigKey\nfrom tests.integrate_test.test_integrate_base import TestIntegrateBase\n\n\nclass TestIntegrateDeployAuditDeployOwner(TestIntegrateBase):\n\n def _make_init_config(self) -> dict:\n return {ConfigKey.SERVICE: {ConfigKey.SERVICE_AUDIT: True}}\n\n def test_score(self):\n tx1 = self._make_deploy_tx(\"test_audit_deploy_owner\",\n \"test_score\",\n self._addr_array[0],\n ZERO_SCORE_ADDRESS)\n\n prev_block, tx_results = self._make_and_req_block([tx1])\n\n self._write_precommit_state(prev_block)\n\n self.assertEqual(tx_results[0].status, int(True))\n score_addr1 = tx_results[0].score_address\n tx_hash1 = tx_results[0].tx_hash\n\n tx2 = self._make_score_call_tx(self._admin,\n GOVERNANCE_SCORE_ADDRESS,\n 'acceptScore',\n {\"txHash\": f'0x{bytes.hex(tx_hash1)}'})\n\n prev_block, tx_results = self._make_and_req_block([tx2])\n\n self._write_precommit_state(prev_block)\n\n self.assertEqual(tx_results[0].status, int(True))\n\n tx3 = self._make_deploy_tx(\"test_audit_deploy_owner\",\n \"test_link_score\",\n self._addr_array[1],\n ZERO_SCORE_ADDRESS,\n deploy_params={\"score_addr\": str(score_addr1)})\n\n prev_block, tx_results = self._make_and_req_block([tx3])\n\n self._write_precommit_state(prev_block)\n\n self.assertEqual(tx_results[0].status, int(True))\n score_addr2 = tx_results[0].score_address\n tx_hash2 = tx_results[0].tx_hash\n\n tx4 = self._make_score_call_tx(self._admin,\n GOVERNANCE_SCORE_ADDRESS,\n 'acceptScore',\n {\"txHash\": f'0x{bytes.hex(tx_hash2)}'})\n\n prev_block, tx_results = self._make_and_req_block([tx4])\n\n self._write_precommit_state(prev_block)\n\n self.assertEqual(tx_results[0].status, int(True))\n\n event_logs: list = tx_results[0].event_logs\n before_install, hello, after_install, _ = event_logs\n before_install = before_install.indexed\n hello = hello.indexed\n after_install = after_install.indexed\n self.assertEqual(before_install[1], self._addr_array[1])\n self.assertEqual(hello[1], score_addr2)\n self.assertEqual(after_install[1], self._addr_array[1])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"sojinkim-icon/icon-service","sub_path":"tests/integrate_test/test_integrate_deploy_audit_deploy_owner.py","file_name":"test_integrate_deploy_audit_deploy_owner.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"71154794222","text":"# pyright: reportGeneralTypeIssues=true, reportMissingModuleSource=false\nfrom __future__ import annotations\nimport gdb\nimport sys\nfrom typing import Optional, List\n\nsys.path.insert(0, \"/root/scripts\")\n\nfrom lkd.context_bp import GenericHeapSprayBP, GenericContextBP\nfrom lkd.session import GenericSession\nfrom lkd.structs import Slab, KmemCache\nfrom lkd.utils import current_cpu\n\n\nclass Session(GenericSession):\n # select which variant of the exploit we are debugging\n RW_VARIANT: bool = False\n # Exploit Parameters: KEEP IN SYNC WITH config.h\n ROP_PAYLOAD_KEY_BYTES: int = (0x400 // 2) - 0x10\n MAX_KEYS: int = 200\n MAX_KEY_BYTES: int = 20000\n # offset of misaligned free\n KM1k_OFFSET: int = 0x0 if RW_VARIANT else 0x8\n\n # slots per slab\n N_SLOTS_KM32: int = 0x80\n N_SLOTS_KM4k: int = 0x8\n N_SLOTS_KM1k: int = 0x10\n\n # defragmentation parameters\n N_DEFRAGMENT_POLL_THREADS: int = 0x4 * N_SLOTS_KM4k\n N_DEFRAGMENT_KM1k: int = 0x8 * N_SLOTS_KM1k\n N_DEFRAGMENT_KM32 = 0x8 * N_SLOTS_KM32\n\n # spray parameters\n CHUNK_REPLACE_TTY: int = 0x8\n CHUNK_FACTOR_PIPE: int = 3\n N_SLOW_POLL_THREADS: int = N_SLOTS_KM4k - 1\n N_2ndSTAGE_POLL_THREADS: int = N_SLOTS_KM32\n N_SPRAY_TTY: int = N_SLOTS_KM32 // 2\n N_SPRAY_PIPE: int = CHUNK_FACTOR_PIPE * N_SPRAY_TTY\n N_SPRAY_SEQ_OPS: int = N_SLOTS_KM32\n N_KEYS: int = N_SLOTS_KM32 - N_SLOW_POLL_THREADS\n N_KEYS_2: int = MAX_KEYS - 0x8\n # stop a bit earlier in case this last spray runs into some\n # limit\n N_KEYS_3: int = (MAX_KEY_BYTES // ROP_PAYLOAD_KEY_BYTES) - 5\n N_RW_PTMX: int = 0x2 * N_SLOTS_KM1k\n\n # BPs\n # heap spray monitoring\n key_bp: KeyAllocationsBP | None = None\n poll_bp: PollListHeapAllocationsBP | None = None\n seq_ops_bp: SeqOpsBP | None = None\n tty_bp: TtyHeapSprayBP | None = None\n pipe_bp: PipeHeapSprayBP | None = None\n tty_write_buf_bp: TtyWriteBufHeapSprayBP | None = None\n # memory corruption\n write_bp: gdb.Breakpoint | None = None\n # rop\n rop_start_bp: gdb.Breakpoint | None = None\n\n # general purpose caches we use\n km4k: KmemCache | None = None\n km32: KmemCache | None = None\n km1k: KmemCache | None = None\n\n # arbitrarily freed slots\n uaf_km32_slot: Optional[int] = None\n # possibly misaligned, sub KM1k_OFFSET to get aligned slot\n uaf_km1k_slot: Optional[int] = None\n\n # slabs that contain the arbitrarily freed slots\n km32_slab: Slab | None = None\n km1k_slab: Slab | None = None\n\n # break on any access to freelist pointer offset of \"interesting\"\n # slots\n uaf_km32_access_bp: UafKm32AccessBP | None = None\n uaf_km1k_access_bp: UafKm1kAccessBP | None = None\n uaf_km1k_aligned_access_bp: UafKm1kAlignedAccessBP | None = None\n fake_pipe_buffer_access_bp: FakePipeBufferAccessBP | None = None\n\n\nsession: Session = Session()\n\n\nclass UafKm32AccessBP(gdb.Breakpoint):\n def stop(self) -> bool:\n assert (\n session.poll_bp\n and session.key_bp\n and session.seq_ops_bp\n and session.km32\n and session.km32_slab\n )\n print(\"-\" * 80)\n\n session.km32.print_info()\n session.km32_slab.print_info()\n\n print(\"Status of sprays:\")\n print(\n f\"Keys: n={len(session.key_bp.allocations)} s={session.key_bp.stage}\"\n )\n print(\n f\"Poll: n={len(session.poll_bp.km32_allocations)} s={session.poll_bp.stage}\"\n )\n print(\n f\"SeqOps: n={len(session.seq_ops_bp.allocations)} s={session.seq_ops_bp.stage}\"\n )\n print(\"\")\n\n print(\"A little bit of km32 heap context after UAF slot:\")\n print(\n gdb.execute(\n f\"x/40gx {session.uaf_km32_slot}\", to_string=True\n )\n )\n\n print(\"Access Backtrace:\")\n print(gdb.execute(f\"bt 10\", to_string=True))\n\n print(\"=\" * 80)\n\n return False\n\n\nclass UafKm1kAccessBP(gdb.Breakpoint):\n def stop(self) -> bool:\n assert (\n session.poll_bp\n and session.key_bp\n and session.seq_ops_bp\n and session.km1k_slab\n and session.km1k\n )\n print(\"-\" * 80)\n print(\"Access to MISALIGNED km1k slot's next pointer\")\n\n session.km1k.print_info()\n session.km1k_slab.print_info()\n\n print(\"Status of sprays:\")\n print(\n f\"Keys: n={len(session.key_bp.allocations)} s={session.key_bp.stage}\"\n )\n print(\n f\"Poll: n={len(session.poll_bp.km32_allocations)} s={session.poll_bp.stage}\"\n )\n print(\n f\"SeqOps: n={len(session.seq_ops_bp.allocations)} s={session.seq_ops_bp.stage}\"\n )\n print(\"\")\n\n print(\"A little bit of km1k heap context after UAF slot:\")\n print(\n gdb.execute(\n f\"x/40gx {session.uaf_km1k_slot}\", to_string=True\n )\n )\n\n print(\"Access Backtrace:\")\n print(gdb.execute(f\"bt 10\", to_string=True))\n print(\"=\" * 80)\n\n return False\n\n\nclass UafKm1kAlignedAccessBP(gdb.Breakpoint):\n def stop(self) -> bool:\n assert (\n session.poll_bp\n and session.key_bp\n and session.seq_ops_bp\n and session.km1k_slab\n and session.km1k\n )\n print(\"-\" * 80)\n print(\"Access to ALIGNED km1k slot's next pointer\")\n\n session.km1k.print_info()\n session.km1k_slab.print_info()\n\n print(\"Status of sprays:\")\n print(\n f\"Keys: n={len(session.key_bp.allocations)} s={session.key_bp.stage}\"\n )\n print(\n f\"Poll: n={len(session.poll_bp.km32_allocations)} s={session.poll_bp.stage}\"\n )\n print(\n f\"SeqOps: n={len(session.seq_ops_bp.allocations)} s={session.seq_ops_bp.stage}\"\n )\n print(\"\")\n\n print(\"A little bit of km1k heap context after UAF slot:\")\n print(\n gdb.execute(\n f\"x/40gx {session.uaf_km1k_slot}\", to_string=True\n )\n )\n print(\"Access Backtrace:\")\n print(gdb.execute(f\"bt 10\", to_string=True))\n print(\"=\" * 80)\n\n return False\n\n\nclass FakePipeBufferAccessBP(gdb.Breakpoint):\n def stop(self) -> bool:\n assert (\n session.poll_bp\n and session.key_bp\n and session.seq_ops_bp\n and session.km1k_slab\n and session.km1k\n )\n\n print(\"-\" * 80)\n print(\"Access to `page` of first fake `pipe_buffer`\")\n\n session.km1k.print_info()\n session.km1k_slab.print_info()\n\n print(\"Status of sprays:\")\n print(\n f\"Keys: n={len(session.key_bp.allocations)} s={session.key_bp.stage}\"\n )\n print(\n f\"Poll: n={len(session.poll_bp.km32_allocations)} s={session.poll_bp.stage}\"\n )\n print(\n f\"SeqOps: n={len(session.seq_ops_bp.allocations)} s={session.seq_ops_bp.stage}\"\n )\n print(\"\")\n\n print(\"A little bit of km1k heap context after UAF slot:\")\n print(\n gdb.execute(\n f\"x/40gx {session.uaf_km1k_slot}\", to_string=True\n )\n )\n print(\"Access Backtrace:\")\n print(gdb.execute(f\"bt 10\", to_string=True))\n print(\"=\" * 80)\n\n return False\n\n\nclass TtyWriteBufHeapSprayBP(GenericHeapSprayBP):\n allocations: List[GenericHeapSprayBP.Allocation] = []\n\n # stages: 0 = reclaim arbitrarily freed pipe_buffer array\n\n def _stop(self) -> bool:\n address_of_allocation: int = int(\n gdb.parse_and_eval(\"buf_chunk\")\n )\n size_of_allocation: int = int(gdb.parse_and_eval(\"chunk\"))\n cpu: str = f\"CPU#{current_cpu()}\"\n\n self.allocations.append(\n self.Allocation(\n size_of_allocation, address_of_allocation, cpu\n )\n )\n\n session.slot_histories[address_of_allocation].append(\n f\"wb{self.stage}\"\n )\n\n if (\n self.stage == 0\n and len(self.allocations) == session.N_RW_PTMX\n ):\n self.stage += 1\n self.show_and_clear_allocations()\n\n # avoid being notified of every access that happens\n # during arbitrary r/w phase, also kill all other BPs for\n # performance\n print(\"Disable all Breakpoints\")\n gdb.execute(\"disable\")\n print(gdb.execute(\"info breakpoints\", to_string=True))\n\n \"\"\"\n # break when we overwrite the fs\n current_fs: int = int(\n gdb.parse_and_eval(\"&$lx_current().fs\")\n )\n print(f\"Break on accesses to ¤t->fs@{current_fs}\")\n gdb.Breakpoint(\n f\"*(unsigned long*){current_fs}\",\n gdb.BP_WATCHPOINT,\n gdb.WP_ACCESS,\n )\n current_cred_uid: int = int(\n gdb.parse_and_eval(\"&$lx_current().cred.uid\")\n )\n print(\n \"Break on accesses to ¤t->cred->uid@\"\n f\"{current_cred_uid}\"\n )\n gdb.Breakpoint(\n f\"*(unsigned long*){current_cred_uid}\",\n gdb.BP_WATCHPOINT,\n gdb.WP_ACCESS,\n )\n print(\"Break when setting process comm\")\n gdb.Breakpoint(\"kernel/sys.c:2335\")\n \"\"\"\n\n return True\n\n return False\n\n def show_allocations(self) -> None:\n self._print_header(\"tty->write_buf allocations\")\n self._show_slub_state(session.km1k, session.km1k_slab)\n self._show_allocations(\n self.allocations,\n session,\n # we want the aligned slot here as pipe buffers replace\n # ttys and are thus aligned\n session.uaf_km1k_slot - session.KM1k_OFFSET\n if session.uaf_km1k_slot\n else None,\n )\n self._print_footer()\n\n def clear_allocations(self) -> None:\n self.allocations.clear()\n\n\nclass PipeHeapSprayBP(GenericHeapSprayBP):\n pipe_buffer_allocations: List[GenericHeapSprayBP.Allocation] = []\n\n # stages: 0 = Defragmentation ; 1 = Replace tty with pipe buffers\n\n def _stop(self) -> bool:\n pipe_buffer: int = int(gdb.parse_and_eval(\"$rax\")) + 2**64\n\n cpu: str = f\"CPU#{current_cpu()}\"\n\n self.pipe_buffer_allocations.append(\n self.Allocation(0x400, pipe_buffer, cpu)\n )\n\n session.slot_histories[pipe_buffer].append(f\"pp{self.stage}\")\n\n if (\n self.stage == 0\n and len(self.pipe_buffer_allocations)\n == session.N_DEFRAGMENT_KM1k\n ):\n self.stage += 1\n self.show_and_clear_allocations()\n return False\n elif (\n self.stage == 1\n and len(self.pipe_buffer_allocations)\n == session.N_SPRAY_PIPE\n ):\n self.stage += 1\n self.show_and_clear_allocations()\n return True\n\n return False\n\n def show_allocations(self) -> None:\n self._print_header(\"pipe_buffer allocations\")\n self._show_slub_state(session.km1k, session.km1k_slab)\n self._show_allocations(\n self.pipe_buffer_allocations,\n session,\n # we want the aligned slot here as pipe buffers replace\n # ttys and are thus aligned\n session.uaf_km1k_slot - session.KM1k_OFFSET\n if session.uaf_km1k_slot\n else None,\n )\n self._print_footer()\n\n def clear_allocations(self) -> None:\n self.pipe_buffer_allocations.clear()\n\n\nclass TtyHeapSprayBP(GenericHeapSprayBP):\n tty_file_private_allocations: List[\n GenericHeapSprayBP.Allocation\n ] = []\n tty_allocations: List[GenericHeapSprayBP.Allocation] = []\n\n def _stop(self) -> bool:\n p_tty_file_private: int = (\n int(gdb.parse_and_eval(\"$r13\")) + 2**64\n )\n p_tty: int = int(gdb.parse_and_eval(\"$rdi\")) + 2**64\n\n if not session.km1k:\n session.km1k = KmemCache.from_virtual(p_tty)\n\n cpu: str = self.current_cpu()\n\n self.tty_file_private_allocations.append(\n self.Allocation(0x20, p_tty_file_private, cpu)\n )\n self.tty_allocations.append(self.Allocation(0x400, p_tty, cpu))\n\n session.slot_histories[p_tty_file_private].append(\n f\"tt{self.stage}\"\n )\n session.slot_histories[p_tty].append(f\"tt{self.stage}\")\n\n if len(self.tty_allocations) == session.N_SPRAY_TTY:\n self.stage += 1\n self.show_and_clear_allocations()\n return True\n\n return False\n\n def show_allocations(self) -> None:\n self._print_header(\"tty_struct allocations\")\n self._show_slub_state(session.km1k, session.km1k_slab)\n self._show_allocations(self.tty_allocations, session)\n\n self._print_header(\"tty_file_private allocations\")\n self._show_slub_state(session.km32, session.km32_slab)\n self._show_allocations(\n self.tty_file_private_allocations, session\n )\n self._print_footer()\n\n def clear_allocations(self) -> None:\n self.tty_allocations.clear()\n self.tty_file_private_allocations.clear()\n\n\nclass SeqOpsBP(GenericHeapSprayBP):\n \"\"\"Monitors heap-allocated seq_operations objects (in context of\n exploit process)\"\"\"\n\n # allocations in this spray\n allocations: List[GenericHeapSprayBP.Allocation] = []\n # stages: 0: defragment ; 1: reclaim freed key\n\n def _stop(self) -> bool:\n address_of_allocation: int = (\n int(gdb.parse_and_eval(\"$rax\")) + 2**64\n )\n size_of_allocation: int = 32\n\n cpu: str = f\"CPU#{current_cpu()}\"\n\n self.allocations.append(\n self.Allocation(\n size_of_allocation, address_of_allocation, cpu\n )\n )\n\n session.slot_histories[address_of_allocation].append(\n f\"so{self.stage}\"\n )\n\n if (\n self.stage == 0\n and len(self.allocations) == session.N_DEFRAGMENT_KM32\n ):\n self.show_and_clear_allocations()\n self.stage += 1\n return False\n elif (\n self.stage == 1\n and len(self.allocations) == session.N_SPRAY_SEQ_OPS\n ):\n self.stage += 1\n self.show_and_clear_allocations()\n\n return True\n\n return False\n\n def clear_allocations(self) -> None:\n self.allocations.clear()\n\n def show_allocations(self) -> None:\n self._print_header(f\"seq_operations allocations ({self.stage})\")\n self._show_allocations(\n self.allocations,\n session,\n session.uaf_km32_slot,\n )\n self._print_footer()\n\n\nclass KeyAllocationsBP(GenericHeapSprayBP):\n \"\"\"Monitors heap-allocated user_key_payload objects (in context of\n exploit process)\"\"\"\n\n # allocations in this spray\n allocations: List[GenericHeapSprayBP.Allocation] = []\n\n # stages: 0 = null next ; 1 = pipe_buf next ; 2 = ROP payload\n\n def _stop(self) -> bool:\n address_of_allocation: int = int(gdb.parse_and_eval(\"upayload\"))\n size_of_allocation: int = int(\n gdb.parse_and_eval(\"sizeof(*upayload) + datalen\")\n )\n\n cpu: str = f\"CPU#{current_cpu()}\"\n\n self.allocations.append(\n self.Allocation(\n size_of_allocation, address_of_allocation, cpu\n )\n )\n\n session.slot_histories[address_of_allocation].append(\n f\"k{self.stage}\"\n )\n\n # pause on finished heap sprays\n if self.stage == 0 and len(self.allocations) == session.N_KEYS:\n self.show_and_clear_allocations()\n self.stage += 1\n elif (\n self.stage == 1\n and len(self.allocations) == session.N_KEYS_2\n ):\n session.uaf_km1k_slot = int(\n gdb.parse_and_eval(\n f\"*(unsigned long*){self.allocations[0x10].address}\"\n )\n )\n session.km1k_slab = Slab.from_virtual(\n session.uaf_km1k_slot - session.KM1k_OFFSET,\n )\n print(\n f\"Km1k UAF slot (misaligned): {hex(session.uaf_km1k_slot)}\"\n )\n if session.RW_VARIANT:\n session.fake_pipe_buffer_access_bp = (\n FakePipeBufferAccessBP(\n f\"*(unsigned long*){session.uaf_km1k_slot}\",\n gdb.BP_WATCHPOINT,\n gdb.WP_ACCESS,\n )\n )\n else:\n # break on accesses to UAF slot in km1k\n # also show some heap context\n # the misaligned slot's freepointer is here\n session.uaf_km1k_access_bp = UafKm1kAccessBP(\n f\"*(unsigned long*){session.uaf_km1k_slot+512}\",\n gdb.BP_WATCHPOINT,\n gdb.WP_ACCESS,\n )\n # the aligned slot's freepointer is here\n session.uaf_km1k_aligned_access_bp = UafKm1kAlignedAccessBP(\n f\"*(unsigned long*){session.uaf_km1k_slot + 512 - session.KM1k_OFFSET}\",\n gdb.BP_WATCHPOINT,\n gdb.WP_ACCESS,\n )\n # also break when we are about to take over RIP\n print(\n f\"*free_pipe_info+61 if $rcx=={session.uaf_km1k_slot - session.KM1k_OFFSET}\"\n )\n session.rop_start_bp = gdb.Breakpoint(\n f\"*free_pipe_info+61 if $rcx=={session.uaf_km1k_slot - session.KM1k_OFFSET}\"\n )\n session.rop_start_bp.condition = (\n f\"$rcx=={session.uaf_km1k_slot - session.KM1k_OFFSET}\"\n )\n\n self.show_and_clear_allocations()\n self.stage += 1\n elif (\n self.stage == 2\n and len(self.allocations) == session.N_KEYS_3\n ):\n self.show_and_clear_allocations()\n self.stage += 1\n else:\n return False\n\n return True\n\n def show_allocations(self) -> None:\n self._print_header(\n f\"user_key_payload allocations in spray #{self.stage}\"\n )\n if self.stage == 2:\n self._show_slub_state(session.km1k, session.km1k_slab)\n self._show_allocations(\n self.allocations,\n session,\n session.uaf_km1k_slot, # we want the misaligned slot here\n )\n else:\n self._show_slub_state(session.km32, session.km32_slab)\n self._show_allocations(\n self.allocations,\n session,\n session.uaf_km32_slot,\n )\n self._print_footer()\n\n def clear_allocations(self) -> None:\n self.allocations.clear()\n\n\nclass PollListHeapAllocationsBP(GenericHeapSprayBP):\n \"\"\"Monitors heap-allocated poll_list objects (in context of\n exploit process)\"\"\"\n\n km4k_victim_allocations: List[GenericHeapSprayBP.Allocation] = []\n km4k_defragment_allocations: List[\n GenericHeapSprayBP.Allocation\n ] = []\n km32_allocations: List[GenericHeapSprayBP.Allocation] = []\n\n # stages: 0: defragment ; 1: 4k victims ; 2: 32 victims\n\n def _stop(self):\n nfds_in_allocation: int = int(gdb.parse_and_eval(\"len\"))\n address_of_allocation: int = int(gdb.parse_and_eval(\"walk\"))\n\n cpu: str = f\"CPU#{current_cpu()}\"\n\n if nfds_in_allocation == 510:\n # a km4k victim for the initial memory corruption\n if not session.km4k:\n session.km4k = KmemCache.from_virtual(\n address_of_allocation\n )\n self.km4k_victim_allocations.append(\n self.Allocation(0x1000, address_of_allocation, cpu)\n )\n elif nfds_in_allocation == 2:\n # a km32 list tail\n if not session.km32:\n session.km32 = KmemCache.from_virtual(\n address_of_allocation\n )\n self.km32_allocations.append(\n self.Allocation(0x20, address_of_allocation, cpu)\n )\n elif nfds_in_allocation == 508:\n self.km4k_defragment_allocations.append(\n self.Allocation(0x1000, address_of_allocation, cpu)\n )\n\n session.slot_histories[address_of_allocation].append(\n f\"pl{self.stage}\"\n )\n\n if (\n self.stage == 0\n and len(self.km4k_defragment_allocations)\n == session.N_DEFRAGMENT_POLL_THREADS\n ):\n self.stage += 1\n print(\"Prepare: km4k defragmentation done\")\n elif (\n self.stage == 1\n and len(self.km32_allocations)\n == session.N_SLOW_POLL_THREADS\n ):\n print(\"S1: km4k and km32 victim allocations done\")\n self.stage += 1\n elif (\n self.stage == 2\n and len(self.km32_allocations)\n == session.N_2ndSTAGE_POLL_THREADS\n ):\n print(\"S7: km32 victim poll_list allocations done\")\n self.stage += 1\n self.show_and_clear_allocations()\n return True\n\n return False\n\n def show_allocations(self) -> None:\n self._print_header(\"poll_list allocations\")\n self._show_slub_state(session.km4k)\n self._show_slub_state(session.km32)\n\n self._print_header(\"poll_list allocations: defragmenting\")\n self._show_allocations(\n self.km4k_defragment_allocations,\n session,\n )\n\n self._print_header(\"poll_list allocations: victims in km4k\")\n self._show_allocations(\n self.km4k_victim_allocations,\n session,\n )\n\n self._print_header(\"poll_list allocations: victims in km32\")\n self._show_allocations(\n self.km32_allocations,\n session,\n session.uaf_km32_slot,\n )\n\n self._print_footer()\n\n def clear_allocations(self) -> None:\n self.km4k_victim_allocations.clear()\n self.km4k_defragment_allocations.clear()\n self.km32_allocations.clear()\n\n\ndef set_gdb_settings() -> None:\n gdb.execute(\"set breakpoint pending on\")\n gdb.execute(\"set python print-stack full\")\n\n\ndef create_breakpoints_1() -> None:\n session.seq_ops_bp = SeqOpsBP(\"*single_open+41\", comm=\"sploit\")\n session.key_bp = KeyAllocationsBP(\n \"*user_preparse+52\", comm=\"sploit\"\n )\n session.poll_bp = PollListHeapAllocationsBP(\n \"*do_sys_poll+284\", comm=\"sploit\"\n )\n\n\ndef create_breakpoints_2() -> None:\n # break on accesses to UAF object+0x10 (pos. of freelist pointer),\n # show some heap context\n assert session.uaf_km32_slot\n session.uaf_km32_access_bp = UafKm32AccessBP(\n f\"*(unsigned long*){session.uaf_km32_slot+0x10}\",\n gdb.BP_WATCHPOINT,\n gdb.WP_ACCESS,\n )\n # [0: xattr zeroing]\n # [1: allocation of key]\n # 2: free during clean up of poll list (do_sys_poll)\n # 3: reclaim with seq_operations (in single_open)\n # 4: read key I (user_read)\n # 5: read key II (user_read)\n # 6: zeroing by kfree sensitive (rcu_do_batch)\n # 7: reclaim with poll_list (do_sys_poll)\n # 7.1: doing the polling (do_poll)\n # 8: free during close (single_release)\n # 9: xattr pointer init (setxattr)\n # 10: allocation of key (user_preparse)\n # 11: free during clean up of poll list (do_sys_poll)\n\n session.tty_bp = TtyHeapSprayBP(\"*tty_add_file+19\", comm=\"sploit\")\n session.pipe_bp = PipeHeapSprayBP(\"fs/pipe.c:806\", comm=\"sploit\")\n session.tty_write_buf_bp = TtyWriteBufHeapSprayBP(\n \"drivers/tty/tty_io.c:1008\", comm=\"sploit\"\n )\n\n\ndef inspect_initial_corruption() -> None:\n \"\"\"Debugging success or failure of the initial memory corruption\"\"\"\n\n session.write_bp = gdb.Breakpoint(\n \"cormon_proc_write\", temporary=True\n )\n session.write_bp.commands = (\n \"b *cormon_proc_write+87\\n\" + \"b *cormon_proc_write+163\\n\" + \"c\"\n )\n\n gdb.execute(\"c\")\n # cormon_proc_write+87 (allocation of filter buffer)\n reclaim_bytes = gdb.execute(f\"x/8gx $rax\", to_string=True)\n reclaim_object = str(\n gdb.parse_and_eval(f\"*(struct poll_list*)($rax)\")\n )\n\n gdb.execute(\"c\")\n # cormon_proc_write+163 (memory corruption)\n\n assert session.key_bp\n session.key_bp.show_allocations()\n\n assert session.poll_bp\n session.poll_bp.show_and_clear_allocations()\n\n # Filter reclaims in km4k\n print(\"\\nThis object was reclaimed by syscall filter:\")\n print(reclaim_bytes, end=\"\")\n print(reclaim_object)\n\n # Initial memory corruption in km4k\n print(\"\\nThis object in km4k will be corrupted:\")\n gdb.execute(f\"x/8gx ($rbx+$rbp)\")\n print(gdb.parse_and_eval(f\"*(struct poll_list*)($rbx+$rbp)\"))\n\n to_be_corrupted_pointer_value = gdb.parse_and_eval(\n \"*(unsigned long*)($rbx+$rbp)\"\n )\n\n print(\"\\nThis object in km32 will be arbitrarily freed:\")\n gdb.execute(f\"x/4gx {to_be_corrupted_pointer_value} & ~0xFF\")\n print(\n gdb.parse_and_eval(\n f\"*(struct user_key_payload*)({to_be_corrupted_pointer_value} & ~0xFF)\"\n )\n )\n\n print(\"\\nA little bit of km32 heap context around UAF slot:\")\n gdb.execute(f\"x/40gx {to_be_corrupted_pointer_value} & ~0xFF\")\n\n session.uaf_km32_slot = (\n int(to_be_corrupted_pointer_value) & 0xFFFFFFFFFFFFFF00\n )\n session.km32_slab = Slab.from_virtual(session.uaf_km32_slot)\n\n\ndef main():\n set_gdb_settings()\n\n create_breakpoints_1()\n\n inspect_initial_corruption()\n\n create_breakpoints_2()\n\n\nmain()\n","repo_name":"vobst/like-dbg-fork-public","sub_path":"io/scripts/gdb_script_corjail.py","file_name":"gdb_script_corjail.py","file_ext":"py","file_size_in_byte":25807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"38249112535","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport sys\nimport new\nimport json\nimport unittest\nfrom time import sleep\nimport argparse\nimport random\n\nfrom selenium import webdriver\nimport nose\nfrom nose.plugins.multiprocess import MultiProcess\n\nfrom lib.parse_version import parse_version\n\nUSER = os.environ['SAUCE_USER']\nKEY = os.environ['SAUCE_KEY']\nHOST = 'ondemand.saucelabs.com'\nPORT = 80\nJAR_URL = \"https://sauce-bundles.s3.amazonaws.com/selenium/selenium-server-%s%s.jar\"\n\n\nclass FirefoxSupportTest(unittest.TestCase):\n __test__ = False\n\n def setUp(self):\n parsed_version = parse_version(self.sel_version)\n jar_addition = ''\n if parsed_version <= (2, 19, 0):\n jar_addition += '-newcert'\n if (2, 14, 0) <= parsed_version <= (2, 25, 0):\n jar_addition += '-dnsfix'\n dc = {'platform': self.platform,\n 'browserName': self.br,\n 'version': self.version,\n 'selenium-version': JAR_URL % (self.sel_version, jar_addition),\n 'name': self.name,\n 'prevent-requeue': True,\n }\n self.native = False\n for i in range(3):\n try:\n self.driver = webdriver.Remote(desired_capabilities=dc,\n command_executor=\"http://%s:%s@%s:%s/wd/hub\"\n % (USER, KEY, HOST, PORT))\n except Exception:\n self.driver = None\n else:\n self.native = self.driver.capabilities['nativeEvents']\n break\n\n def test_browser_works(self):\n if not self.driver:\n self.fail(\"Failed to launch browser\")\n url = random.choice(['https://saucelabs.com/login',\n 'https://google.com', 'https://www.bing.com',\n 'https://yahoo.com', 'https://www.facebook.com',\n 'https://instagram.com'])\n self.driver.get(url)\n for i in range(30):\n if self.driver.title:\n break\n sleep(0.5)\n else:\n self.fail(\"title never showed\")\n\n def tearDown(self):\n if self.driver:\n self.driver.quit()\n with open(os.path.join(self.platform, '%s_%s_results.json' % (self.version,\n self.sel_version)),\n 'w') as results_file:\n results = {self.version: {\n self.sel_version: {\n 'worked': sys.exc_info() == (None, None, None),\n 'native': self.native}}}\n results_file.write(json.dumps(results))\n\n\nparser = argparse.ArgumentParser(\n description='Collect Firefox vs Selenium version support matrix')\nparser.add_argument('--firefox', '-f', metavar='FF_VER',\n nargs='*', help='Specific versions of Firefox to test')\nparser.add_argument('--selenium', '-s', metavar='SE_VER',\n nargs='*', help='Specific versions of Selenium to test')\nparser.add_argument('--platform', '-p', type=str, default=\"Windows 2003\",\n help='The OS to run the tests on.')\nparser.add_argument('--threads', '-t', type=int, default=10,\n help='Amount of threads to run tests in parallel on.')\nargs = parser.parse_args()\n\njars_to_test = args.selenium if args.selenium else [\n '2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0', '2.6.0', '2.7.0',\n '2.8.0', '2.9.0', '2.10.0', '2.11.0', '2.12.0', '2.13.0', '2.14.0',\n '2.14.1', '2.15.0', '2.16.0', '2.16.1', '2.17.0', '2.18.0', '2.19.0',\n '2.20.0', '2.21.0', '2.22.0', '2.23.0', '2.23.1', '2.24.0', '2.24.1',\n '2.25.0', '2.26.0', '2.27.0', '2.28.0', '2.29.0', '2.30.0', '2.31.0',\n '2.32.0', '2.33.0', '2.34.0', '2.35.0', '2.36.0', '2.37.0', '2.38.0',\n '2.39.0', '2.40.0', '2.41.0', '2.42.2', '2.43.1', '2.44.0', '2.45.0',\n]\nfirefoxes_to_test = args.firefox if args.firefox else range(3, 38)\nclasses = {}\nfor jar_version in jars_to_test:\n for ff_version in firefoxes_to_test:\n name = \"%s_%s_%s\" % (FirefoxSupportTest.__name__, jar_version, ff_version)\n name = name.encode('ascii')\n if name.endswith(\".\"):\n name = name[:-1]\n for x in \". \":\n name = name.replace(x, \"\")\n\n d = dict(FirefoxSupportTest.__dict__)\n d.update({'__test__': True,\n '__name__': name,\n 'name': name,\n 'platform': args.platform,\n 'br': 'firefox',\n 'version': ff_version,\n 'sel_version': jar_version,\n })\n\n classes[name] = new.classobj(name, (FirefoxSupportTest,), d)\nglobals().update(classes)\n\nif __name__ == \"__main__\":\n if not os.path.isdir(args.platform):\n os.mkdir(args.platform)\n nose.core.run(argv=['--nocapture', \"-v\", \"--processes\", args.threads,\n \"--process-timeout\", \"1800\", __file__],\n plugins=[MultiProcess()])\n","repo_name":"santiycr/selenium-firefox-support-matrix","sub_path":"test_the_firefoxen.py","file_name":"test_the_firefoxen.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"91"} +{"seq_id":"16819609438","text":"n = int(input())\nskill = list(map(int,input().split()))\n \nskill.sort()\nans = 1\nright = 1\nfor left in range(n-1):\n while rightkey:\n return self.search(p, q - 1, key, lists)\n else:\n return self.search(q + 1, r, key, lists)\n return False\n\na=[[1,2,8,9],[2,4,9,12],[4,7,10,13],[6,8,11,15]]\nprint(a)\nprint(Solution().Find(15,a))","repo_name":"HuXingkai/pythonProject","sub_path":"AlgorithmInPython/DensionSearch.py","file_name":"DensionSearch.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34778003787","text":"class DListNode:\n def __init__(self, val=None):\n self.val = val\n self.next = None\n self.prev = None\n\n\nclass DList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def append(self, val):\n newnode = DListNode(val)\n\n if not self.tail:\n self.tail = self.head = newnode\n return\n\n self.tail.next = newnode\n newnode.prev = self.tail\n self.tail = newnode\n\n def prepend(self, val):\n newnode = DListNode(val)\n\n if not self.head:\n self.tail = self.head = newnode\n return\n\n newnode.next = self.head\n self.head.prev = newnode\n self.head = newnode\n\n def pop(self, index = 0):\n cur = self.head\n i = 0\n\n while i < index and cur:\n cur = cur.next\n\n if i != index or cur is None:\n raise IndexError\n\n if cur.prev:\n cur.prev.next = cur.next\n\n if cur.next:\n cur.next.prev = cur.prev\n\n if cur == self.head:\n self.head = cur.next\n if cur == self.tail:\n self.tail = cur.prev\n\n def __repr__(self):\n cur = self.head\n s = []\n while cur:\n s.append(cur.val)\n cur = cur.next\n return ' '.join(map(str,s))\n\nclass SListNode:\n def __init__(self, val=None):\n self.val = val\n self.next = None\n\n\nclass SList:\n def __init__(self):\n self.head = None\n\n def append(self, val):\n newnode = SListNode(val)\n\n if not self.head:\n self.head = newnode\n return\n \n tail = self.head\n while tail.next != None:\n tail = tail.next\n tail.next = newnode\n\n def prepend(self, val):\n newnode = SListNode(val)\n newnode.next = self.head\n self.head = newnode\n\n def pop(self, index = 0):\n dummy = SListNode()\n dummy.next = self.head\n cur = dummy\n i = 0\n\n while i < index and cur.next:\n cur = cur.next\n i += 1\n\n if i != index:\n raise IndexError\n\n cur.next = cur.next.next\n self.head = dummy.next\n\n def __repr__(self):\n cur = self.head\n s = []\n while cur:\n s.append(cur.val)\n cur = cur.next\n return ' '.join(map(str,s))\n\nl = SList()\n\nfor x in range(10):\n l.append(x)\n\nprint(l)\n\nfor x in range(10):\n l.prepend(x)\n\nprint(l)\n\nl.pop()\n\nprint(l)\nl.pop(2)\nprint(l)\n","repo_name":"vkumar62/EPIJudge","sub_path":"epi_judge_python/linkedlist.py","file_name":"linkedlist.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"16946429399","text":"import os\nimport time\nfrom lp_optimize import ModelLP, ModelLPA\nfrom datetime import datetime\n# from urllib.parse import urlparse\n\nfrom sqlalchemy import create_engine \nfrom sqlalchemy import Column, DateTime, Integer, Float \nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\nfrom sqlalchemy.dialects.postgresql import ARRAY\n\nfrom celery import Celery, Task\n\n\nCELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL', 'pyamqp://admin:pass@localhost:5672//')\nCELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND', 'rpc://')\nDATABASE_URL = os.environ.get('DATABASE_URL', 'postgresql://postgres:password@db:5432/postgres')\n\ncelery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)\n\nBase = declarative_base()\n\n\nclass DBTask(Task):\n _engine = None\n _Session = None\n\n def after_return(self, *args, **kwargs):\n if self._Session is not None:\n self._Session.remove()\n\n @property\n def engine(self):\n if self._engine is None:\n self._engine = create_engine(DATABASE_URL)\n return self._engine\n\n @property\n def Session(self):\n if self._Session is None:\n self._Session = scoped_session(sessionmaker(bind=self.engine))\n Base.metadata.create_all(self.engine)\n return self._Session\n\n\nclass Model(Base): \n __tablename__ = 'models'\n\n id = Column(Integer, primary_key=True)\n created_date = Column(DateTime)\n status = Column(Integer)\n starting_supply = Column(Float)\n price = Column(Float)\n shortage = Column(Float)\n salvage = Column(Float)\n planting_cost_na = Column(Float)\n processing_cost_na = Column(Float)\n planting_cost_sa = Column(Float)\n processing_cost_sa = Column(Float)\n yield_dist_na = Column(ARRAY(Float))\n yield_na = Column(ARRAY(Float))\n yield_dist_sa = Column(ARRAY(Float))\n yield_sa = Column(ARRAY(Float))\n demand_dist = Column(ARRAY(Float))\n demand = Column(ARRAY(Float))\n gross_margin = Column(Float)\n acres_na = Column(Float)\n acres_sa = Column(Float)\n revenue = Column(Float)\n finished_inventory = Column(Float)\n\n\n@celery.task(name='tasks.add')\ndef add(x: int, y: int):\n time.sleep(5)\n result = {\n \"x\": x,\n \"y\": y,\n \"sum\": x+y\n }\n return result\n\n@celery.task(name='tasks.optimize')\ndef optimize(**args):\n for k in args:\n if k == 'n_intervals':\n args[k] = int(args[k]) \n else:\n args[k] = float(args[k])\n problem = ModelLP(**args)\n status = problem.solve()\n result = problem.expected_result()\n result.update({\"status\": status})\n return result\n\n@celery.task(base=DBTask, bind=True, name='tasks.optimizeA')\ndef optimizeA(self, **args):\n for k in args:\n if isinstance(args[k], list):\n for i in range(len(args[k])):\n args[k][i] = float(args[k][i])\n else:\n args[k] = float(args[k])\n problem = ModelLPA(**args)\n status = problem.solve()\n result = problem.expected_result()\n result.update({\"status\": status})\n\n args_result = args.copy()\n args_result.update(result)\n\n # for k in args_result:\n # if isinstance(args_result[k], np.ndarray):\n # args_result[k] = args_result[k].tolist()\n\n new_model = Model(**args_result)\n\n session = self.Session()\n session.add(new_model) \n session.commit()\n\n return result\n # return args\n","repo_name":"realtman/flask-vue-kubernetes","sub_path":"services/celery-queue/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"13315273883","text":"import printy\nimport add_inf\nimport read\nimport complex\nimport deli\nimport sqlite3\n\n\ndef red_teacher():\n read.tc()\n deli.del_teacher()\n complex.in_teacher()\n # con = sqlite3.connect('school_base.db')\n # with con:\n # cur = con.cursor()\n # y = printy.id()\n # cur.execute(f\"\"\"select course_id, first_name, last_name, mail, number_phone\n # from teachers where id = {y};\"\"\")\n # step = cur.fetchone()\n # con.commit()\n # vyvod = [*step]\n # x = 0\n # while 6 != x:\n # print(vyvod)\n # print(\"\"\"Что вы хотите изменить?\n # 1. Имя\n # 2. Фамилию\n # 3. Почту\n # 4. Номер телефона\n # 5. Курс\n # 6. Выход\n # \"\"\")\n # x = printy.vybor()\n # if x == 1:\n # vyvod[1] = printy.text('новое имя')\n # cur.execute(\n # f\"\"\"update teachers set 'first_name' = :slovo where id = {y}\"\"\", {'slovo': vyvod[1]})\n # elif x == 2:\n # vyvod[2] = printy.text('новую фамилию')\n # cur.execute(\n # f\"\"\"update teachers set 'last_name' = :slovo where id = {y}\"\"\", {'slovo': vyvod[2]})\n # elif x == 3:\n # vyvod[3] = printy.text('новую почту')\n # cur.execute(\n # f\"\"\"update teachers set 'mail' = :slovo where id = {y}\"\"\", {'slovo': vyvod[3]})\n # elif x == 4:\n # vyvod[4] = printy.text('новый номер')\n # cur.execute(\n # f\"\"\"update teachers set 'number_phone' = :slovowhere id = {y}\"\"\", {'slovo': vyvod[4]})\n # elif x == 5:\n # read.ci()\n # vyvod[0] = printy.integ('другой курс')\n # cur.execute(\n # f\"\"\"update teachers set 'course_id' = :slovo where id = {y}\"\"\", {'slovo': vyvod[0]})\n # vyvod = tuple(vyvod)\n # print(vyvod)\n # add_inf.add_teacher(vyvod)\n\n\ndef red_student():\n read.ss()\n y = printy.id()\n con = sqlite3.connect('school_base.db')\n with con:\n cur = con.cursor()\n cur.execute(f\"\"\"select stream_id, s_first_name, s_last_name, s_mail, s_number_phone \n from students where id = {y}\"\"\")\n step = cur.fetchone()\n vyvod = [*step]\n cur.execute(f\"\"\"delete from students where id = {y}\"\"\")\n x = 0\n while 6 != x:\n print(\"\"\"Что вы хотите изменить?\n 1. Имя\n 2. Фамилию\n 3. Почту\n 4. Номер телефона\n 5. Поток\n 6. Выход\n \"\"\")\n x = printy.vybor()\n if x == 1:\n vyvod[1] = printy.text('новое имя')\n elif x == 2:\n vyvod[2] = printy.text('новую фамилию')\n elif x == 3:\n vyvod[3] = printy.text('новую почту')\n elif x == 4:\n vyvod[4] = printy.text('новый номер телефона')\n elif x == 5:\n vyvod[0] = printy.integ('id потока')\n add_inf.add_student(vyvod)\n\n\ndef red_stream():\n read.stc()\n y = printy.id()\n con = sqlite3.connect('school_base.db')\n cur = con.cursor()\n with con:\n cur.execute(f\"\"\"select teacher_id, number, date_start, date_finish\n from streams where id = {y};\"\"\")\n step = cur.fetchone()\n vyvod = [*step]\n cur.execute(f\"\"\"delete from streams where id = {y};\"\"\")\n x = 0\n print(\"\"\"Что вы хотите изменить?\n 1. Номер потока\n 2. Дату начала\n 3. Дату окончания\n 4. Преподователь\n 5. Выход\n \"\"\")\n x = printy.vybor()\n while 5 != x:\n if x == 1:\n vyvod[1] = printy.text('новый номер потока')\n elif x == 2:\n vyvod[2] = printy.text('дату начала курса')\n elif x == 3:\n vyvod[3] = printy.text('дату окнчания курса')\n elif x == 4:\n vyvod[0] = printy.integ('id преподователя')\n add_inf.add_teacher(vyvod)\n\n\ndef red_course():\n read.ci()\n deli.del_course()\n complex.in_course()\n # y = printy.id()\n # con = sqlite3.connect('school_base.db')\n # cur = con.cursor()\n # with con:\n # cur.execute(f\"\"\"select course_name from courses where id = {y};\"\"\")\n # step = cur.fetchone()\n # vyvod = [*step]\n # cur.execute(f\"\"\"delete from Course where id = {y};\"\"\")\n # x = 0\n # print(\"\"\"Что вы хотите изменить?\n # 1. Название курса\n # 2. Выход\n # \"\"\")\n # x = printy.vybor()\n # while 2 != x:\n # if x == 1:\n # vyvod[0] = printy.text('новое название курса')\n # add_inf.add_teacher(vyvod)\n","repo_name":"remegus/Git","sub_path":"python/school_base/redact.py","file_name":"redact.py","file_ext":"py","file_size_in_byte":5133,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"39636393796","text":"import os\nimport sys\nimport random\nimport datetime\nimport argparse\nimport xml.etree.ElementTree as ET\nfrom typing import List, Tuple, Dict\n\nimport numpy as np\nfrom tqdm import tqdm\n\nimport torch\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom imantics import Mask, Polygons\n\nfrom model import get_model_instance_segmentation\nfrom dataset import TrainDataset, TestDataset\nfrom augmentations import Compose, ToTensor\nfrom utils import indent, fixed_seed\n\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--mode', type=str, default='train')\nparser.add_argument('--num_classes', type=int, default=23)\nparser.add_argument('--epochs', type=int, default=10)\nparser.add_argument('--batch_size', type=int, default=8)\nparser.add_argument('--cuda', type=bool, default=True)\nargs = parser.parse_args()\n\ndevice: str = 'cuda:0' if args.cuda else 'cpu'\nprint(device)\nclass_nums: Dict[int, str] = {\n 1:'sidewalk_blocks', 2: 'alley_damaged', 3:'sidewalk_damaged', 4: 'caution_zone_manhole', 5: 'braille_guide_blocks_damaged', \n 6: 'alley_speed_bump', 7: 'roadway_crosswalk', 8: 'sidewalk_urethane', 9: 'caution_zone_repair_zone', 10: 'sidewalk_asphalt', \n 11: 'sidewalk_other', 12: 'alley_crosswalk', 13: 'caution_zone_tree_zone', 14: 'caution_zone_grating', 15: 'roadway_normal',\n 16: 'bike_lane', 17: 'caution_zone_stairs', 18: 'alley_normal', 19: 'sidewalk_cement', 20: 'braille_guide_blocks_normal', \n 21: 'sidewalk_soil_stone', 22: 'alley',\n}\nclass_names: Dict[str, int] = {v: k for k, v in class_nums.items()}\n\naugmentations = {\n 'train': Compose([\n ToTensor(),\n ]),\n 'test': Compose([\n ToTensor(),\n ])\n}\n\ndef collate_fn(batch: torch.Tensor) -> Tuple:\n return tuple(zip(*batch))\n\nif args.mode != 'test':\n trainset = TrainDataset(class_names, augmentations['train'])\n print(f'Size: trainset: {len(trainset):,}')\n train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=collate_fn)\n\ntestset = TestDataset(augmentations['test'])\ntest_loader = DataLoader(testset, batch_size=1, shuffle=False, collate_fn=collate_fn)\n\n\ndef train_fn(\n model: torch.nn, \n data_loader: DataLoader, \n optimizer: optim, \n device: torch.device, \n epoch: int):\n model.train()\n start_time = datetime.datetime.now()\n num_images: int = 0\n for i, (images, targets) in enumerate(data_loader):\n images = list(image.to(device) for image in images)\n targets = [{k: v.to(device) for k, v in t.items()} for t in targets]\n images = torch.stack(images)\n num_images += len(images)\n \n optimizer.zero_grad()\n loss_dict: Dict[str, torch.Tensor] = model(images, targets)\n loss: float = sum(loss for loss in loss_dict.values())\n \n loss.backward()\n optimizer.step()\n\n if (i+1) % 10 == 0:\n print('-'*50)\n print(f'Epoch {epoch+1}[{len(data_loader.dataset):,}/{(num_images/len(data_loader.dataset))*100:.2f}%] '\n f'- Elapsed time: {datetime.datetime.now() - start_time}\\n'\n f' - loss: classifier={loss_dict[\"loss_classifier\"]:.6f}, box_reg={loss_dict[\"loss_box_reg\"]:.6f}, '\n f'objectness={loss_dict[\"loss_objectness\"]:.6f}, rpn_box_reg={loss_dict[\"loss_rpn_box_reg\"]:.6f}')\n \n\ndef test_fn(\n model: torch.nn, \n data_loader: DataLoader, \n class_nums: Dict,\n device: torch.device):\n image_ids = [image_id.split('.')[1][-17:] for image_id in data_loader.dataset.image_ids]\n xml_root = ET.Element('predictions')\n model.eval()\n batch_size = data_loader.batch_size\n with torch.no_grad():\n for i, (images, _) in tqdm(enumerate(data_loader)):\n images = list(image.to(device) for image in images)\n outputs = model(images)\n for j, output in enumerate(outputs):\n image_name = image_ids[i*batch_size+j]\n xml_image = ET.SubElement(xml_root, 'image', {'name': image_name})\n\n masks = output['masks'].detach().cpu().numpy()\n labels = output['labels'].detach().cpu().numpy()\n scores = output['scores'].detach().cpu().numpy()\n\n for mask, label, score in zip(masks, labels, scores):\n mask_bin = np.where(mask[0] > 0.1, True, False)\n polygons = Mask(mask_bin).polygons()\n points = polygons.points\n point = ''.join([str(p[0]) + ',' + str(p[1]) +';' for p in points[0]])\n attribs = {\n 'class_name': class_nums[label],\n 'score': str(float(score)), \n 'polygon': point,\n }\n ET.SubElement(xml_image, 'predict', attribs)\n \n indent(xml_root)\n tree = ET.ElementTree(xml_root)\n tree.write('./output/prediction.xml')\n print('Save predicted labels.xml...\\n')\n\n\n\n\ndef main():\n fixed_seed(42)\n if args.mode == 'train':\n model = get_model_instance_segmentation(args.num_classes).to(device)\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = optim.SGD(params, lr=1e-3, momentum=0.9, weight_decay=1e-4)\n for epoch in range(args.epochs):\n train_fn(model, train_loader, optimizer, device, epoch)\n torch.save(model.state_dict(), f'./weight/test{epoch}.pth')\n else:\n model = get_model_instance_segmentation(args.num_classes).to(device)\n model.load_state_dict(torch.load('./weight/test1.pth'))\n model.eval()\n\n test_fn(model, test_loader, class_nums, device)\n \n\nif __name__ == '__main__':\n main()\n\n","repo_name":"unerue/competition","sub_path":"aichallenge/task08/run_baseline.py","file_name":"run_baseline.py","file_ext":"py","file_size_in_byte":5733,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"16480341176","text":"from argparse import ArgumentParser\r\nimport requests \r\nimport sys\r\nimport os\r\n\r\ndef main(argv):\r\n parser = ArgumentParser()\r\n parser.add_argument(\"-i\", \"--ip\", dest=\"slsIp\", help=\"IP address of SLS gateway\", default=\"192.168.1.2\")\r\n parser.add_argument(\"-f\", \"--folder\", dest=\"backupFolder\", help=\"Name of backup folder\", default=\"backup\")\r\n args = parser.parse_args()\r\n\r\n slsIp = args.slsIp\r\n backupFolder = args.backupFolder\r\n\r\n print(\"slsIp: {}, folder: {}\".format(slsIp, backupFolder))\r\n process_folder('/', slsIp, backupFolder)\r\n\r\ndef process_folder(path, slsIp, backupFolder):\r\n r = requests.get('http://{}/api/files?path={}'.format(slsIp, path))\r\n json = r.json()\r\n if json == None:\r\n print(\"Error while retrieving data, response is: {}\".format(r.text))\r\n return\r\n \r\n if \"success\" not in json:\r\n print(\"Response was unsuccessfull.\")\r\n return\r\n\r\n folderItems = json['result']\r\n for folderItem in folderItems:\r\n print(\"- {}\".format(folderItem))\r\n name = folderItem[\"name\"]\r\n folderRealPath = os.path.realpath(backupFolder)\r\n\r\n if folderItem['is_dir'] == True:\r\n process_folder(\"{}/{}\".format(name), slsIp, backupFolder)\r\n continue\r\n else:\r\n cleanFolderPath = folderRealPath.replace(\"/\", os.sep)\r\n filePath = cleanFolderPath + name.replace(\"/\", os.sep)\r\n print(filePath)\r\n\r\n r = requests.get('http://{}/api/files?path={}'.format(slsIp, name))\r\n r.encoding = 'utf-8'\r\n with open(filePath, 'w') as file:\r\n file.write(r.text)\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv)\r\n","repo_name":"monster1025/home-assistant-alt","sub_path":"sls/docker/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"70914049583","text":"class Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n carry = 1\n for i in xrange(len(digits)-1, -1, -1):\n tmp = carry + digits[i]\n carry = tmp / 10\n digits[i] = tmp % 10\n if carry == 0:\n break\n return [1] + digits if carry == 1 else digits\n","repo_name":"GeorgyZhou/Leetcode-Problem","sub_path":"lc66.py","file_name":"lc66.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28198228653","text":"import pandas as pd\nimport numpy as np\nfrom sklearn import tree\nfrom sklearn.tree import _tree\nimport graphviz\nfrom sklearn.ensemble import RandomForestClassifier\nfrom lifelines.statistics import multivariate_logrank_test\nfrom .metrics import *\nfrom .survival_analysis import *\n\n\ndef tree_model_with_performance(train_X_y, min_sample_leaf, max_depth=None, valid_X_y=None, class_weight=None):\n X, y = train_X_y\n clf = tree.DecisionTreeClassifier(class_weight=class_weight, min_samples_split=round(min_sample_leaf*5/3), min_samples_leaf=min_sample_leaf, max_depth=max_depth, random_state=1234).fit(X, y)\n if valid_X_y is None:\n vX = X\n vy = y\n else:\n vX, vy = valid_X_y\n pred = clf.predict(vX)\n prob = clf.predict_proba(vX)[:, 1]\n \n performance = compute_performance(vy, pred, prob)\n \n fi = pd.DataFrame({'feature': X.columns, 'importance': clf.feature_importances_}).sort_values(by='importance', ascending=False)\n fi = fi.loc[fi['importance'] > 0]\n \n dot_data = tree.export_graphviz(clf, out_file=None, feature_names=X.columns, proportion=True, class_names=['0', '1'], filled=True, rounded=True, special_characters=True) \n graph = graphviz.Source(dot_data)\n \n result = dict()\n result['model'] = clf\n result['performance'] = performance\n result['feature importance'] = fi\n result['tree graph'] = graph\n result['columns'] = X.columns\n\n return result\n\n\ndef sequential_tree_modeling(data_list, y, min_sample_leaf, max_depth=None, valid_X_list=None, valid_y=None, class_weight=None):\n models = list()\n for i, d in enumerate(data_list):\n if valid_X_list is None and valid_y is None:\n tmp = tree_model_with_performance([d, y], min_sample_leaf, max_depth, class_weight=class_weight)\n else:\n tmp = tree_model_with_performance([d, y], min_sample_leaf, max_depth, [valid_X_list[i], valid_y], class_weight)\n models.append(tmp)\n \n return models\n\n\ndef tree_to_code(tree, feature_names):\n global script \n script = list()\n tree_ = tree.tree_\n feature_name = [feature_names[i] if i != _tree.TREE_UNDEFINED else \"undefined!\" for i in tree_.feature]\n n_all = tree_.n_node_samples[0]\n \n def recurse(node, depth):\n indent = depth\n impurity = \"{0:.3f}\".format(tree_.impurity[node])\n values = tree_.value[node][0]\n score = float(\"{0:.3f}\".format(values[1] / sum(values)))\n n = float(\"{0:.1f}\".format(tree_.n_node_samples[node] / n_all * 100))\n if tree_.feature[node] != _tree.TREE_UNDEFINED:\n name = feature_name[node]\n threshold = \"{0:.2f}\".format(tree_.threshold[node])\n script.append(\"{}, {} <= {}, {}, {}%, {}, {}\".format(indent, name, threshold, impurity, n, values, score))\n recurse(tree_.children_left[node], depth + 1)\n script.append(\"{}, {} > {}, {}, {}%, {}, {}\".format(indent, name, threshold, impurity, n, values, score))\n recurse(tree_.children_right[node], depth + 1)\n else:\n script.append(\"{}, {}, {}%, {}, {}\".format(indent, impurity, n, values, score))\n \n recurse(0, 1)\n \n return script, [feature_name[0], \"{0:.2f}\".format(tree_.threshold[0])]\n\n\ndef tree_rule_summariser(script):\n line_lens = list(map(lambda x: len(x.split(',')), script))\n \n rules = list()\n for i, l in enumerate(line_lens):\n if l == 5:\n last = script[i].split(', ')\n rule = dict()\n rule['impurity'] = last[1]\n rule['ratio'] = last[2]\n samples = last[3].strip('[').strip(']').strip(' ')\n blank = samples.index(' ')\n rule['sample0'] = float(samples[:blank-1])\n rule['sample1'] = float(samples[blank+1:])\n rule['score'] = float(last[-1])\n rule['condition'] = list()\n j = i - 1\n current = last[0]\n while True:\n con = script[j].split(', ')\n if int(con[0]) < int(current) and len(con) == 6:\n current = con[0]\n try:\n where = con[1].index('<=')\n var = con[1][:where-1]\n direction = 'max'\n except ValueError:\n where = con[1].index('>')\n var = con[1][:where-1]\n tmp = con[1][where+1:] + ' < ' + var\n con[1] = tmp\n direction = 'min'\n in_not = np.array(list(map(lambda x: var in x, rule['condition'])))\n try:\n which = np.array(range(len(in_not)))[in_not][0]\n except IndexError:\n which = -1\n if sum(in_not) == 0:\n rule['condition'].append(con[1])\n else:\n r = rule['condition'][which]\n if direction == 'min':\n if ' < ' in r:\n if float(r[:r.index(' < ')]) < float(con[1][:con[1].index(' < ')]):\n rr = con[1][:con[1].index(' < ')] + ' < ' + r[r.index(' < '):]\n rule['condition'][which] = rr\n else:\n rule['condition'][which] = con[1][:con[1].index(' < ')] + ' < ' + r\n elif direction == 'max':\n if '<=' in r:\n if float(r[r.index('<=')+2:]) > float(con[1][con[1].index('<=')+2:]):\n rr = r[:r.index('<=')-1] + ' <= ' + con[1][con[1].index('<=')+2:]\n rule['condition'][which] = rr\n else:\n rule['condition'][which] = r + ' <= ' + con[1][con[1].index('<=')+2:]\n if int(current) == 1:\n break\n j -= 1\n rule['condition'] = ', '.join(rule['condition'])\n rules.append(rule)\n \n return pd.DataFrame(rules)\n\n\ndef rule_synthesiser(rule_df, cutoff=0.5):\n rule_df['class'] = 0\n rule_df['class'].loc[rule_df['score'] >= 0.5] = 1\n \n i = 0\n rm_idx = list()\n while i < len(rule_df) - 1:\n if rule_df['class'][i] == rule_df['class'][i+1]:\n first = rule_df['condition'][i].split(', ')\n second = rule_df['condition'][i+1].split(', ')\n if len(first) == len(second):\n diff = list(filter(lambda x: x[0] != x[1], list(zip(first, second))))\n if len(diff) == 1:\n div = list(map(lambda x: list(map(lambda y: y.strip(' ').strip('= '), x.split(' <'))), diff[0]))\n if len(div[0]) == len(div[1]):\n if len(div[0]) == 2:\n rule_df['condition'][i] = ', '.join(first[1:])\n else:\n if div[0][0] == div[1][2]:\n added = div[1][0] + ' < ' + div[1][1] + ' <= ' + div[0][2]\n elif div[0][2] == div[1][0]:\n added = div[0][0] + ' < ' + div[0][1] + ' <= ' + div[1][2]\n rule_df['condition'][i] = ', '.join([added] + first[1:]) \n else:\n if len(div[0]) == 3:\n base = div[0]\n com = div[1]\n elif len(div[1]) == 3:\n base = div[1]\n com = div[0]\n in_tf = list(map(lambda x: x in com, base))\n if in_tf[0] == True:\n added = ' <= '.join(base[1:])\n else:\n added = ' < '.join(base[:-1])\n rule_df['condition'][i] = ', '.join([added] + first[1:])\n def rewrite_row(cols, ii):\n for c in cols:\n if c == 'ratio':\n tmp = float(rule_df[c][ii][:-1]) + float(rule_df[c][ii+1][:-1])\n rule_df[c][ii] = \"{0:.1f}\".format(tmp) + '%'\n elif c == 'score':\n rule_df['score'][ii] = rule_df['sample1'][ii] / (rule_df['sample0'][ii] + rule_df['sample1'][ii])\n elif c == 'impurity':\n rule_df['impurity'][i] = \"{0:.3f}\".format(rule_df['score'][i] * (1 - rule_df['score'][i]) * 2)\n else:\n rule_df[c][ii] = float(rule_df[c][ii]) + float(rule_df[c][ii+1])\n rewrite_row(['ratio', 'sample0', 'sample1', 'score', 'impurity'], i)\n rm_idx.append(i+1)\n i += 1\n i += 1\n return rule_df.drop(rm_idx).reset_index(drop=True)\n\n\ndef tree_scorer(rule_df, root, cutoff=0.5):\n while True:\n new_df = rule_synthesiser(rule_df, cutoff)\n if len(new_df) == len(rule_df):\n root_rule = ' <= '.join(root)\n var_cut_i = root_rule.index('=')\n i = 0\n left_con = [[1], [1]]\n last_class = -1\n while True:\n if last_class == rule_df['class'][i]:\n left_con[last_class][-1] += 1\n else:\n last_class = rule_df['class'][i]\n left_con[last_class].append(1)\n \n if root_rule in rule_df['condition'][i]:\n i += 1\n elif root_rule[:var_cut_i] in rule_df['condition'][i]:\n ii = rule_df['condition'][i].index(root_rule[:var_cut_i]) + var_cut_i\n try:\n iii = rule_df['condition'][i][ii:].index(', ') + ii\n except ValueError:\n iii = len(rule_df['condition'][i])\n if float(root_rule[var_cut_i+1:]) >= float(rule_df['condition'][i][ii+2:iii]):\n i += 1\n else:\n break\n else:\n break\n mid_last = i - 1\n \n last_class = -1\n right_con = [[1], [1]]\n for i in range(mid_last+1, len(rule_df)):\n if last_class == rule_df['class'][i]:\n right_con[last_class][-1] += 1\n else:\n last_class = rule_df['class'][i]\n right_con[last_class].append(1)\n \n def find_class_maxlen(con):\n maxlen = list(map(lambda x: max(x), con))\n idx = maxlen.index(max(maxlen))\n return (idx, maxlen[idx])\n left_con = find_class_maxlen(left_con)\n right_con = find_class_maxlen(right_con)\n \n var_points = find_split_points(rule_df)\n bunja = np.log(sum(map(lambda x: len(var_points[x]), var_points.keys())))\n score = 10\n if left_con[0] != right_con[0]:\n score += min([(left_con[1] + right_con[1]) * 10, 100])\n else:\n score += abs(left_con[1] - right_con[1]) * 10\n score /= (bunja + len(rule_df))\n \n return rule_df.sort_values(by='class')[['condition', 'score', 'impurity', 'ratio', 'class']], score\n else:\n rule_df = new_df\n\n\ndef find_split_points(rule_df):\n var_points = dict()\n for i in range(len(rule_df)):\n div_condition = rule_df['condition'][i].split(', ')\n for j in range(len(div_condition)):\n div = list(map(lambda x: x.strip(' ').strip('= '), div_condition[j].split('<')))\n try:\n num = float(div[0])\n var_i = 1\n except ValueError:\n var_i = 0\n if div[var_i] not in var_points.keys():\n var_points[div[var_i]] = list()\n for k in range(len(div)):\n if k != var_i:\n var_points[div[var_i]].append(float(div[k]))\n for var in var_points.keys():\n var_points[var] = sorted(set(var_points[var]))\n return var_points\n \n\ndef count_by_cutoff(data_df, rule_df):\n var_points = find_split_points(rule_df)\n \n l = len(data_df)\n cnts = list()\n for var in var_points.keys():\n for i in range(len(var_points[var])):\n tmp = sum(data_df[var] <= var_points[var][i])\n cnts.append([var, var_points[var][i], tmp, l - tmp])\n cnts = pd.DataFrame(cnts, columns=['feature', 'cutoff', ' <= ', ' > '])\n \n return cnts\n\n\ndef random_forest_with_performance(train_X_y, n_estimators, max_depth, min_sample_leaf, ratio_features=0.8, valid_X_y=None, random_state=1234):\n X, y = train_X_y\n if valid_X_y is None:\n vX = X\n vy = y\n else:\n vX, vy = valid_X_y\n rf = RandomForestClassifier(class_weight='balanced_subsample', max_features=ratio_features, \n n_estimators=n_estimators, max_depth=max_depth, \n min_samples_leaf=min_sample_leaf, random_state=random_state).fit(X, y)\n pred = rf.predict(vX)\n prob = rf.predict_proba(vX)[:, 1]\n \n performance = compute_performance(vy, pred, prob)\n \n features = pd.DataFrame({'feature': X.columns, 'importance': rf.feature_importances_}).sort_values(by='importance', ascending=False)\n features = features.loc[features['importance'] > 0.0]\n\n result = dict()\n result['model'] = rf\n result['feature importance'] = features\n result['performance'] = performance\n result['columns'] = X.columns\n\n return result\n\n\ndef select_important_variables(Xy, topN, n_estimators, max_depth, min_sample_leaf, ratio_features):\n rr = random_forest_with_performance(Xy, n_estimators, max_depth, min_sample_leaf, ratio_features)\n return rr['feature importance']['feature'].values[:topN]\n\n\ndef train_and_filter_models(train_Xy, col_list, depth_list, sample_leaf, min_auc, event, duration, max_pvalue, valid_X=None):\n X, y = train_Xy\n if valid_X is None:\n vX = X\n else:\n vX = valid_X\n X_list = list(map(lambda x: X[x], col_list))\n models = list()\n for d in depth_list:\n mds = sequential_tree_modeling(X_list, y, sample_leaf, d, class_weight='balanced')\n mds = list(filter(lambda x: x['performance']['AUC'] >= min_auc, mds))\n preds = list(map(lambda x: x['model'].predict(vX[x['columns']]), mds))\n ps = list(map(lambda x: logrank_pvalue(duration, x, event), preds))\n mds = list(filter(lambda x: x[1] <= max_pvalue, zip(mds, ps)))\n models += list(map(lambda x: x[0], mds))\n print(len(models))\n \n return models\n\n\ndef select_models(models, min_score, print_score=False):\n rule_list = list()\n score_df = list()\n for md in models:\n tree = md['model']\n cols = md['columns']\n rules, root = tree_to_code(tree, cols)\n rules = tree_rule_summariser(rules)\n rules, score = tree_scorer(rules, root)\n rule_list.append(rules)\n score_df.append(score)\n score_df = pd.DataFrame({'model_score': score_df}).sort_values(by='model_score', ascending=False)\n score_df = score_df.loc[score_df['model_score'] >= min_score]\n print(len(score_df))\n\n if print_score:\n print(score_df)\n\n selected_models = list()\n for i, j in enumerate(score_df.index):\n models[j]['rules'] = rule_list[j]\n models[j]['model_score'] = score_df.iloc[i]['model_score']\n selected_models.append(models[j])\n\n return selected_models","repo_name":"gowun/BladderCancer_AMC","sub_path":"ModelingTools/tree_modeling.py","file_name":"tree_modeling.py","file_ext":"py","file_size_in_byte":14052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"13805811451","text":"from __future__ import annotations\nimport typing as t\n\nfrom kivy.lang import Builder\nfrom kivy.properties import ListProperty, NumericProperty, ObjectProperty, StringProperty\nfrom kivy.uix.button import Button\nfrom kivy.uix.screenmanager import Screen\n\nfrom dcdb.game import Game\nfrom dcdb.input import EndTurnInput, SelectionInput\n\nimport dcgui.uix.opponent\nfrom dcgui.uix.option_menu import OptionMenu\n\n\nBuilder.load_string('''\n:\n opponent_layout: opponent_layout\n\n BoxLayout:\n orientation: 'horizontal'\n size_hint: .9, .3\n pos_hint: {'center_x': .5, 'top': .95}\n \n OpponentsView:\n id: opponent_layout\n size_hint_x: .5\n \n BoxLayout:\n orientation: 'horizontal'\n size_hint: .9, .3\n pos_hint: {'center_x': .5, 'center_y': .5}\n padding: dp(10)\n spacing: dp(10)\n \n BoxLayout:\n spacing: dp(10)\n size_hint_x: .2\n \n OptionStackView:\n viewer: root.game.players[0]\n cards: root.game.main_deck\n \n OptionStackView:\n viewer: root.game.players[0]\n cards: root.game.weakness_stack\n \n BoxLayout:\n orientation: 'vertical'\n size_hint_x: .6\n \n BoxLayout:\n id: option_layout\n size_hint_y: .25\n \n Label:\n id: hint_label\n font_size: sp(20)\n \n LineUp:\n game: root.game\n cards: root.game.line_up\n size_hint_y: .5\n \n Label:\n text: 'Power: ' + str(root.power)\n font_size: sp(20)\n size_hint_y: .25\n \n BoxLayout:\n spacing: dp(10)\n size_hint_x: .2\n \n OptionStackView:\n viewer: root.game.players[0]\n cards: root.game.sv_stack\n on_option: root.process(self.option)\n \n OptionStackView:\n viewer: root.game.players[0]\n cards: root.game.kick_stack\n on_option: root.process(self.option)\n \n PlayerField:\n game: root.game\n player: root.game.players[0]\n player_name: root.player_names[0]\n size_hint: .9, .3\n pos_hint: {'center_x': .5, 'y': .05}\n player_name: 'Player 1'\n''')\n\n\nclass GameScreen(Screen):\n\n game: Game = ObjectProperty()\n player_names: t.List[str] = ListProperty()\n turn_player_name: t.Optional[str] = StringProperty(allownone=True)\n power: int = NumericProperty()\n end_turn_button = ObjectProperty()\n option_menu = ObjectProperty()\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.end_turn_button = Button(text='End Turn', on_press=lambda btn: self.process(0))\n self.option_menu = OptionMenu()\n self.option_menu.bind(on_select=self._on_menu_option)\n\n self.game.register(self)\n\n self.opponent_layout.players = self.game.players[1:]\n self.opponent_layout.names = self.player_names[1:]\n\n self.game.start()\n\n def _on_menu_option(self, menu, option):\n menu.dismiss()\n self.game.process(option)\n\n def process(self, option):\n self.game.process(option)\n\n def observe_options(self, game):\n try:\n end_turn = next(option for option in game.options if isinstance(option, EndTurnInput))\n except StopIteration:\n end_turn = None\n\n if end_turn and not self.end_turn_button.parent:\n self.ids.option_layout.add_widget(self.end_turn_button)\n elif not end_turn and self.end_turn_button.parent:\n self.ids.option_layout.remove_widget(self.end_turn_button)\n\n menu_options = [\n option for option in game.options\n if isinstance(option, SelectionInput) and isinstance(option.selection, str)\n ]\n if menu_options:\n self.option_menu.hint = self.game.hint\n self.option_menu.options = menu_options\n self.option_menu.open()\n\n def observe_power(self, game):\n self.power = game.power\n\n def observe_turn_player(self, game):\n if self.game.turn_player:\n self.turn_player_name = self.player_names[self.game.turn_player.index]\n else:\n self.turn_player_name = None\n\n def observe_hint(self, game):\n if self.game.hint:\n self.ids.hint_label.text = self.game.hint\n elif self.turn_player_name:\n self.ids.hint_label.text = self.turn_player_name\n else:\n self.ids.hint_label.text = ''\n","repo_name":"noahcgreen/dcdb","sub_path":"dcgui/screens/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28835766802","text":"\nimport os\nimport json\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker, scoped_session\n\nfrom projman_filler.samplesheet import Samplesheet\nfrom projman_filler.interop_service import InteropService\nfrom projman_filler.lane_level_statistics import calculate_lane_statistics\nfrom projman_filler.sample_level_statistics import calculate_sample_statistics\nfrom projman_filler.exceptions import FlowcellAlreadyInDb\nfrom projman_filler.models.db_models import FlowcellRunfolder\n\nfrom projman_filler.repositories.sample_results_repo import SampleResultRepo\nfrom projman_filler.repositories.flowcell_lane_results_repo import FlowcellLaneResultsRepo\nfrom projman_filler.repositories.flowcell_runfolder_repo import FlowcellRunfolderRepo\n\n\nclass App(object):\n\n def __init__(self, db_connection_string, debug=False, flowcell_runfolder_repo=None,\n flowcell_lane_results_repo=None, sample_results_repo=None):\n\n engine = create_engine(db_connection_string, echo=debug)\n session_factory = scoped_session(sessionmaker())\n session_factory.configure(bind=engine)\n\n if not flowcell_lane_results_repo:\n self.flowcell_lane_results_repo = FlowcellLaneResultsRepo(session_factory)\n\n if not flowcell_runfolder_repo:\n self.flowcell_runfolder_repo = FlowcellRunfolderRepo(session_factory)\n\n if not sample_results_repo:\n self.sample_results_repo = SampleResultRepo(session_factory)\n\n @staticmethod\n def get_reads_and_cycles(stats_json):\n reads_and_cycles = {}\n for read_info in stats_json[\"ReadInfosForLanes\"][0][\"ReadInfos\"]:\n if not read_info[\"IsIndexedRead\"]:\n reads_and_cycles[read_info[\"Number\"]] = read_info[\"NumCycles\"]\n return reads_and_cycles\n\n def parse_rundate_from_runfolder_name(self, runfolder_name):\n return os.path.basename(runfolder_name).split(\"_\")[0]\n\n def insert_runfolder_into_db(self, runfolder, bcl2fastq_stats, force=False):\n\n stats_json = os.path.join(runfolder, bcl2fastq_stats, 'Stats.json')\n\n with open(stats_json, 'r') as f:\n stats_json = json.load(f)\n\n flowcell_name = stats_json[\"Flowcell\"]\n conversion_results = stats_json[\"ConversionResults\"]\n\n if self.flowcell_runfolder_repo.contains_flowcell(flowcell_name):\n if force:\n print(\"Found the specified runfolder in the db, but got a force option, so will proceed to \"\n \"delete it and insert new values.\")\n self.flowcell_lane_results_repo.delete_by_flowcell_name(flowcell_name)\n self.flowcell_runfolder_repo.delete_by_flowcell_name(flowcell_name)\n self.sample_results_repo.delete_by_flowcell_name(flowcell_name)\n else:\n raise FlowcellAlreadyInDb\n\n interop = InteropService(runfolder)\n densities = interop.get_densities()\n error_rates = interop.get_error_rates()\n q30s = interop.get_q30()\n reads_and_cycles = self.get_reads_and_cycles(stats_json)\n\n lane_stats = calculate_lane_statistics(flowcell_name, conversion_results, reads_and_cycles,\n error_rates, densities, q30s)\n\n self.flowcell_lane_results_repo.add(list(lane_stats))\n\n samplesheet_file = os.path.join(runfolder, \"SampleSheet.csv\")\n samplesheet = Samplesheet(samplesheet_file)\n\n sample_stats = calculate_sample_statistics(flowcell_name, conversion_results, reads_and_cycles, samplesheet)\n self.sample_results_repo.add(list(sample_stats))\n\n runfolder_name = os.path.basename(runfolder)\n runfolder_date = self.parse_rundate_from_runfolder_name(runfolder)\n flowcell_runfolder = FlowcellRunfolder(flowcell_id=flowcell_name,\n runfolder_name=runfolder_name,\n run_date=runfolder_date)\n self.flowcell_runfolder_repo.add(flowcell_runfolder)\n","repo_name":"matrulda/projman_filler_old","sub_path":"projman_filler/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13668061910","text":"# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\nfrom django.contrib.contenttypes.models import ContentType\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n\n db.rename_table('academics_user', 'general_fafuser')\n # db.rename_table('academics_user', 'general_user')\n db.rename_table('academics_usermeta', 'general_usermeta')\n db.rename_table('academics_usermetatype', 'general_usermetatype')\n\n db.rename_column('academics_course_professors', 'user_id', 'fafuser_id')\n # db.rename_column('general_usermeta', 'user_id', 'fafuser_id')\n\n if not db.dry_run:\n # For permissions to work properly after migrating\n ContentType.objects.filter(app_label='academics', model='user').update(app_label='general', model='fafuser')\n # ContentType.objects.filter(app_label='academics', model='user').update(app_label='general')\n ContentType.objects.filter(app_label='academics', model='usermeta').update(app_label='general')\n ContentType.objects.filter(app_label='academics', model='usermetatype').update(app_label='general')\n\n def backwards(self, orm):\n db.rename_table('general_fafuser', 'academics_user')\n # db.rename_table('general_user', 'academics_user')\n db.rename_table('general_usermeta', 'academics_usermeta')\n db.rename_table('general_usermetatype', 'academics_usermetatype')\n\n db.rename_column('academics_course_professors', 'fafuser_id', 'user_id')\n # db.rename_column('general_usermeta', 'fafuser_id', 'user_id')\n\n if not db.dry_run:\n # For permissions to work properly after migrating\n ContentType.objects.filter(app_label='general', model='fafuser').update(app_label='academics', model='user')\n # ContentType.objects.filter(app_label='general', model='user').update(app_label='academics')\n ContentType.objects.filter(app_label='general', model='usermeta').update(app_label='academics')\n ContentType.objects.filter(app_label='general', model='usermetatype').update(app_label='academics')\n\n models = {\n u'general.fafuser': {\n 'Meta': {'object_name': 'FAFUser'},\n 'auth_user_id': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),\n 'group': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '15'}),\n 'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),\n 'surname': ('django.db.models.fields.CharField', [], {'max_length': '31'})\n },\n u'general.usermeta': {\n 'Meta': {'object_name': 'UserMeta'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'meta': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['general.UserMetaType']\"}),\n 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['general.FAFUser']\"}),\n 'value': ('django.db.models.fields.TextField', [], {})\n },\n u'general.usermetatype': {\n 'Meta': {'object_name': 'UserMetaType'},\n 'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'key': ('django.db.models.fields.CharField', [], {'max_length': '31'}),\n 'multiple': ('django.db.models.fields.BooleanField', [], {}),\n 'type': ('django.db.models.fields.CharField', [], {'max_length': '31'})\n }\n }\n\n complete_apps = ['general']\n","repo_name":"TUM-FAF/FAFSite","sub_path":"fafsite/general/migrations/0002_auto__add_usermeta__add_usermetatype__add_fafuser.py","file_name":"0002_auto__add_usermeta__add_usermetatype__add_fafuser.py","file_ext":"py","file_size_in_byte":3998,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"90"} +{"seq_id":"12321808370","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPurpose: Scrape and format the Federal Reserves Beige book data for text analysis\nInputs: The page_root for the Federal Reserve URL\nOutputs: A dataframe of the data by Beigebook subsection\n\"\"\"\n\n#%%\n#Function: yearURL()\n#Purpose: Returns the specified URL for a year in the Federal Reserves Beige book archives.\n#Inputs: A year in which data is available\n#Output: The URL which has links to the ~8 Beige books written prior to the FOMC meetings\ndef yearURL(year):\n global yearDict\n global page_root\n yearDict = dict()\n page_root = 'https://www.federalreserve.gov'\n page_link = os.path.join(page_root,\"monetarypolicy/beige-book-archive.htm\").replace(\"\\\\\", \"/\")\n #Request archive url\n page_response = requests.get(page_link, timeout=5)\n #Fetch archive url content\n page_content = BeautifulSoup(page_response.content, \"html.parser\")\n #Isolate links\n list_links = page_content.find('div', {'class' : 'panel-body'}).find_all('li')\n #Define regular expression\n re_link = re.compile('.*)\".*')\n #Loop through each link to access month url\n for i in range(0, len(list_links)):\n #Isolate and clean individual links\n link = str(list_links[i]).replace('\\n','').replace('
  • ','').replace('
  • ','')\n #Extract next level\n nllink = re_link.match(link).groupdict()['nextlevel']\n #Define year level URL\n yearlink = os.path.join(page_root, nllink.strip('/')).replace('\\\\', '/')\n yearDict[int(re.findall(r'\\d+', yearlink)[0])] = [yearlink]\n #Return specified year URL\n if(1995 < year < 2019):\n return yearDict[year][0]\n else: print(\"That data is not available. The archives are currently for 1996 - 2018\")\n \n \n#%% \n \n#Function: meetingURL()\n#Purpose: Returns a dictionary or URL's for each of the Beige books written in a given year.\n#Inputs: The year in which we are interested in doing analysis.\n#Output: The URL's to access the individual Beige books.\ndef meetingURL(year):\n root = yearURL(year)\n global meetingDict\n meetingDict = dict()\n #Scrape content from yearURL\n #Request URL\n year_response = requests.get(root, timeout=5)\n #Scrape year URL content\n year_content = BeautifulSoup(year_response.content, \"html.parser\")\n #Isolate meeting specific paths\n meeting = year_content.find('div', {'class' : 'col-xs-12 col-sm-8 col-md-8'})\n meeting_links = meeting.find_all('a')\n #Define regular expression\n re_link = re.compile('
    .*)\".*')\n #Delay web scrape\n time.sleep(1.5)\n #Loop through each month in which there was a meeting per year\n for x in range(0,len(meeting_links)):\n if(('www.federalreserve.gov' in str(meeting_links[x]) or '/monetarypolicy/beigebook' in str(meeting_links[x])) and '.htm' in str(meeting_links[x])):\n meetinglink = re_link.match(str(meeting_links[x])).groupdict()['nextlevel']\n if('www.federalreserve.gov/fomc' in str(meeting_links[x])):\n meetingDict[int(re.findall(r'\\d+', meetinglink)[1])] = [meetinglink]\n elif('www.federalreserve.gov' in str(meeting_links[x])):\n meetingDict[int(re.findall(r'\\d+', meetinglink)[0])] = [meetinglink]\n else:\n meetingDict[int(re.findall(r'\\d+', meetinglink)[0])] = [os.path.join(page_root, meetinglink.strip('/')).replace('\\\\', '/')] \n if(1995 < year < 2019):\n return meetingDict\n else: print(\"That data is not available. The archives are currently for 1996 - 2018\")\n\n\n#%%\n \n#Data Scraping: \n#Note: The sites have three different formats: (1) 1996 - 2010, (2) 2011 - 2016, & (3) 2017-2018 \n#Must utilize individual functions to handle these different formats. \n\n#Functions: scrapeOne(), scrapeTwo(), scrapeThree()\n#Purpose: Scrape text data from specific Beige books\n#Inputs: A year and meeting number (1-8), due to 8 meetings per year (only 2 in 1996)\n#Outputs: The text contents for that Beige book\n \n\n#1996 - 2010\ndef scrapeOne(year, meeting):\n global text\n #Call the function to return updated meetingDict for required year\n meetingURL(year)\n #Identify specific meeting URL\n textURL = meetingDict[list(meetingDict.keys())[meeting - 1]][0]\n textURL = textURL.replace('default', 'FullReport')\n #Request the URL\n text_response = requests.get(textURL, timeout = 5)\n #Scrape the contents\n text = BeautifulSoup(text_response.content, \"html.parser\")\n return text\n\n#2011 - 2016\ndef scrapeTwo(year, meeting):\n global text\n #Call the function to return updated meetingDict for required year\n meetingURL(year)\n #Identify specific meeting URL\n textURL = meetingDict[list(meetingDict.keys())[meeting - 1]][0]\n #Request the URL\n text_response = requests.get(textURL, timeout = 5)\n #Scrape the contents\n text_content = BeautifulSoup(text_response.content, \"html.parser\")\n #Full report\n text = text_content.find('div', {'id' : 'leftText'})\n return text\n\n\n#2017 - 2018\ndef scrapeThree(year, meeting):\n global text\n #Call the function to return updated meetingDict for required year\n meetingURL(year)\n #Identify specific meeting URL\n textURL = meetingDict[list(meetingDict.keys())[meeting - 1]][0]\n #Request the URL\n text_response = requests.get(textURL, timeout = 5)\n #Scrape the contents\n text_content = BeautifulSoup(text_response.content, \"html.parser\")\n #Full report\n text = text_content.find('div', {'id' : 'article'})\n return text\n\n\n#%%\n \n#Function: warning()\n#Purpose: Let's user know if their inputs are invalid\n#Inputs: year and FOMC meeting number for that year.\n#Outputs: A warning if the inputs are invalid given the data, else it passes.\ndef warning(year, meeting):\n if(meeting < 1 or meeting > 8):\n print(\"There are 8 FOMC meetings per year\")\n return True\n elif(year == 1996 and meeting > 2):\n print(\"We only have data for the last two meetings in 1996\")\n return True\n elif(year < 1996 or year > 2018):\n print(\"We don't have data for this year\")\n return True\n \n\n#Function: scrapeText()\n#Purpose: General purpose function combining all scraping techniques utilized in the previous functions\n#Inputs: Year and FOMC meeting number for that year\n#Outputs: The text contained in the specified book\ndef scrapeText(year, meeting):\n global text\n if(warning(year, meeting) != True):\n if(year < 2011):\n return scrapeOne(year, meeting)\n elif(year > 2016):\n return scrapeThree(year, meeting)\n else: return scrapeTwo(year, meeting)\n\n\n#%%\n#Scrape text data to scrapedDataList\ndata = []\nfor i in range(1996, 2019):\n for j in range(1, len(meetingURL(i))+1): \n print(i)\n print(j)\n x = scrapeText(i, j)\n y = {'year':i, 'meeting':j,'text':x}\n data.append(y.copy())\n\n\n#%%\n#Borrowed from: https://stackoverflow.com/questions/753052/strip-html-from-strings-in-python\n#Strips HTML tags from strings\nclass MLStripper(HTMLParser):\n def __init__(self):\n super().__init__()\n self.reset()\n self.strict = False\n self.convert_charrefs= True\n self.text = StringIO()\n def handle_data(self, d):\n self.text.write(d)\n def get_data(self):\n return self.text.getvalue()\n\ndef strip_tags(html):\n s = MLStripper()\n s.feed(html)\n return s.get_data()\n#%%\n \n\nfor index, data in enumerate(data):\n data[index]['text'] = strip_tags(str(data[index]['text'])).replace('\\r', '').replace('\\n', '')\n\n\nraw_text = os.path.join(savedDataFld, \"raw_text.json\").replace('\\\\', '/')\nwith open(raw_text, 'w') as f:\n json.dump(data, f)\n\n\n\n#%%\n","repo_name":"brianvandenakker/beige-book-classifier","sub_path":"PyCode/02_DataScrape.py","file_name":"02_DataScrape.py","file_ext":"py","file_size_in_byte":7718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18193368289","text":"N = int(input())\na = list(map(int, input().split()))\n\nXsum = 0\nfor i in a:\n\tXsum = Xsum ^ i\n\nans = list()\nfor i in a:\n\ts = Xsum ^ i\n#\tprint(s)\n\tans.append(str(s))\n\nprint(' '.join(ans))","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02631/s036676692.py","file_name":"s036676692.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"34385981997","text":"# coding=utf-8\nfrom __future__ import print_function\nimport functools\nimport vgg, pdb, time\nimport tensorflow as tf, numpy as np, os\nimport transform\nfrom utils import get_img\n\nSTYLE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')\nCONTENT_LAYER = 'relu4_2'\nDEVICES = '/cpu:0' #'CUDA_VISIBLE_DEVICES'\n\nos.putenv('MLU_VISIBLE_DEVICES','')\n\ndef loss_function(net, content_features, style_features, content_weight, style_weight, tv_weight, preds, batch_size):\n # 损失函数构建,net 为特征提取网络,content_features 为内容图像特征,style_features 为风格图像特征,content_weight、\n # style_weight 和 tv_weight 分别为特征重建损失、风格重建损失的权重和全变分正则化损失的权重\n\n batch_shape = (batch_size,256,256,3)\n\n # 计算内容损失\n # content_loss\n content_size = _tensor_size(content_features[CONTENT_LAYER])*batch_size\n assert _tensor_size(content_features[CONTENT_LAYER]) == _tensor_size(net[CONTENT_LAYER])\n content_loss = 2 * content_weight * tf.nn.l2_loss(net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) / content_size\n\n # 计算风格损失\n # style_loss\n style_losses = []\n for style_layer in STYLE_LAYERS:\n layer = net[style_layer]\n bs, height, width, filters = map(lambda i:i.value,layer.get_shape())\n size = height * width * filters\n feats = tf.reshape(layer, (bs, height * width, filters))\n feats_T = tf.transpose(feats, perm=[0,2,1])\n grams = tf.matmul(feats_T, feats) / size\n style_gram = style_features[style_layer]\n # TODO: 计算 style_losses\n style_losses.append(2 * tf.nn.l2_loss(grams - style_gram) / size)\n style_loss = style_weight * functools.reduce(tf.add, style_losses) / batch_size\n\n # 使用全变分正则化方法定义损失函数 tv_loss\n # tv_loss\n tv_y_size = _tensor_size(preds[:,1:,:,:])\n tv_x_size = _tensor_size(preds[:,:,1:,:])\n # TODO:将图像 preds 向水平和垂直方向各平移一个像素,分别与原图相减,分别计算二者的 𝐿2 范数 x_tv 和 y_tv\n # Hint: use tf.nn.l2_loss\n y_tv = tf.nn.l2_loss(preds[:, 1:, :, :] - preds[:, :preds.shape[1] - 1, :, :])\n x_tv = tf.nn.l2_loss(preds[:, :, 1:, :] - preds[:, :, :preds.shape[2] - 1, :])\n tv_loss = tv_weight*2*(x_tv/tv_x_size + y_tv/tv_y_size)/batch_size\n\n loss = content_loss + style_loss + tv_loss\n return content_loss, style_loss, tv_loss, loss\n\n \n \n#np arr, np arr\ndef optimize(content_targets, style_target, content_weight, style_weight,\n tv_weight, vgg_path, epochs=2, print_iterations=1000,\n batch_size=4, save_path='saver/fns.ckpt', slow=False,\n learning_rate=1e-3, debug=False, type=0, save=True, load=True):\n # 实时风格迁移训练方法定义,content_targets 为内容图像, style_target 为风格图像, content_weight、style_weight 和 tv_weight 分别为\n # 特征重建损失、风格重建损失和全变分正则化项的权重,vgg_path 为保存 VGG19 网络参数的文件路径\n if slow:\n batch_size = 1\n mod = len(content_targets) % batch_size\n if mod > 0:\n print(\"Train set has been trimmed slightly..\")\n content_targets = content_targets[:-mod] \n \n # 风格特征预处理\n style_features = {}\n\n batch_shape = (batch_size,256,256,3)\n style_shape = (1,) + style_target.shape\n print(style_shape)\n\n # precompute style features\n with tf.Graph().as_default(), tf.device('/cpu:0'), tf.Session() as sess:\n # 使用 numpy 库在 CPU 上处理\n # TODO:使用占位符来定义风格图像 style_image\n style_image = tf.placeholder(tf.float32, style_shape)\n\n #TODO: 依次调用 vgg.py 文件中的 preprocess()、net() 函数对风格图像进行预处理,并将此时得到的特征提取网络传递给 net\n net = vgg.net(vgg_path, vgg.preprocess(style_image))\n\n # 使用 numpy 库对风格图像进行预处理,定义风格图像的格拉姆矩阵\n style_pre = np.array([style_target])\n for layer in STYLE_LAYERS:\n features = net[layer].eval(feed_dict={style_image:style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n style_features[layer] = gram\n\n #TODO:先使用占位符来定义内容图像 X_content,再调用 preprocess() 函数对 X_content 进行预处理,生成 X_pre\n X_content = tf.placeholder(tf.float32, batch_shape)\n X_pre = vgg.preprocess(X_content)\n\n # 提取内容特征对应的网络层\n # precompute content features\n content_features = {}\n content_net = vgg.net(vgg_path, X_pre)\n content_features[CONTENT_LAYER] = content_net[CONTENT_LAYER]\n\n if slow:\n preds = tf.Variable(\n tf.random_normal(X_content.get_shape()) * 0.256\n )\n preds_pre = preds\n else:\n # TODO: 内容图像经过图像转换网络后输出结果 preds,并调用 preprocess() 函数对 preds 进行预处理, 生成 preds_pre\n preds = transform.net(X_content / 255., type)\n preds_pre = vgg.preprocess(preds)\n\n # TODO:preds_pre 输入到特征提取网络,并将此时得到的特征提取网络传递给 net\n net = vgg.net(vgg_path, preds_pre)\n\n # TODO:计算内容损失 content_loss, 风格损失 style_loss, 全变分正则化项 tv_loss, 损失函数 loss\n content_loss, style_loss, tv_loss, loss = loss_function(net, content_features, style_features, \\\n content_weight, style_weight, tv_weight, \\\n preds_pre, batch_size)\n\n # TODO:创建 Adam 优化器,并定义模型训练方法为最小化损失函数方法,返回 train_step\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n # TODO:初始化所有变量\n checkpoint_dir = './ckp_temp/fns.ckpt'\n if load:\n print('loading checkpoint')\n saver = tf.train.Saver()\n if os.path.isdir(checkpoint_dir):\n ckpt = tf.train.get_checkpoint_state(checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n else:\n raise Exception(\"No checkpoint found...\")\n else:\n saver.restore(sess, checkpoint_dir)\n else:\n sess.run(tf.global_variables_initializer())\n\n import random\n uid = random.randint(1, 100)\n print(\"UID: %s\" % uid)\n for epoch in range(epochs):\n num_examples = len(content_targets)\n iterations = 0\n while iterations * batch_size < num_examples:\n start_time = time.time()\n curr = iterations * batch_size\n step = curr + batch_size\n X_batch = np.zeros(batch_shape, dtype=np.float32)\n for j, img_p in enumerate(content_targets[curr:step]):\n X_batch[j] = get_img(img_p, (256,256,3)).astype(np.float32)\n\n iterations += 1\n assert X_batch.shape[0] == batch_size\n\n feed_dict = {\n X_content:X_batch\n }\n\n train_step.run(feed_dict=feed_dict)\n end_time = time.time()\n delta_time = end_time - start_time\n if debug:\n print(\"UID: %s, batch time: %s\" % (uid, delta_time))\n print('iteration: %d'%iterations)\n is_print_iter = int(iterations) % print_iterations == 0\n if slow:\n is_print_iter = epoch % print_iterations == 0\n is_last = epoch == epochs - 1 and iterations * batch_size >= num_examples\n should_print = is_print_iter\n if (iterations == 1 and epoch == 0):\n to_get = [style_loss, content_loss, tv_loss, loss, preds]\n test_feed_dict = {\n X_content:X_batch\n }\n\n tup = sess.run(to_get, feed_dict = test_feed_dict)\n _style_loss,_content_loss,_tv_loss,_loss,_preds = tup\n print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, iterations, _loss))\n to_print = (_style_loss, _content_loss, _tv_loss)\n print('style: %s, content:%s, tv: %s' % to_print)\n\n if should_print:\n to_get = [style_loss, content_loss, tv_loss, loss, preds]\n test_feed_dict = {\n X_content:X_batch\n }\n\n tup = sess.run(to_get, feed_dict = test_feed_dict)\n _style_loss,_content_loss,_tv_loss,_loss,_preds = tup\n losses = (_style_loss, _content_loss, _tv_loss,_loss)\n \n if slow:\n _preds = vgg.unprocess(_preds)\n elif save:\n # TODO:将模型参数保存到 save_path,并将训练的次数 save_id 作为后缀加入到模型名字中\n saver = tf.train.Saver()\n res = saver.save(sess, save_path)\n # 将相关计算结果返回\n yield(_preds, losses, iterations, epoch)\n\ndef _tensor_size(tensor):\n # 对张量进行切片操作,将 NHWC 格式的张量,切片成 HWC,再计算 H、W、C 的乘积\n # 其实就是返回 H * W * C, 利用mul进行reduce\n from operator import mul\n return functools.reduce(mul, (d.value for d in tensor.get_shape()[1:]), 1)\n","repo_name":"ysj1173886760/Learning","sub_path":"ai-system/exp_4_3/src/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":9937,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"90"} +{"seq_id":"8473332966","text":"import sys\r\n\r\nlr = sys.stdin.readline\r\n\r\nnum = int(lr())\r\n\r\nfor _ in range(num):\r\n a = lr().split(' ')\r\n count = 0\r\n result = 0\r\n for i in range(1,len(a)):\r\n result += int(a[i])\r\n avr = result/int(a[0])\r\n for i in range(1, len(a)):\r\n if int(a[i])>avr:\r\n count += 1\r\n print('{:.3f}%'.format((count / int(a[0]) * 100)))","repo_name":"ji-hun-choi/Baekjoon","sub_path":"04.1차원_배열/04344.py","file_name":"04344.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"33537257404","text":"import copy\nimport datetime as dt\nimport locale\nimport logging\nfrom itertools import chain\nfrom typing import Iterable, Optional, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom gs_quant.api.gs.assets import GsAssetApi\nfrom gs_quant.api.gs.portfolios import GsPortfolioApi\nfrom gs_quant.context_base import nullcontext\nfrom gs_quant.instrument import Instrument\nfrom gs_quant.markets import HistoricalPricingContext, PricingContext\nfrom gs_quant.priceable import PriceableImpl\nfrom gs_quant.risk import RiskMeasure\nfrom gs_quant.risk.results import CompositeResultFuture, PortfolioRiskResult, PortfolioPath, PricingFuture\nfrom gs_quant.target.portfolios import Position, PositionSet\n\n_logger = logging.getLogger(__name__)\n\n\nclass Portfolio(PriceableImpl):\n \"\"\"A collection of instruments\n\n Portfolio holds a collection of instruments in order to run pricing and risk scenarios\n\n \"\"\"\n\n def __init__(self,\n priceables: Optional[Union[PriceableImpl, Iterable[PriceableImpl], dict]] = (),\n name: Optional[str] = None):\n \"\"\"\n Creates a portfolio object which can be used to hold instruments\n\n :param priceables: constructed with an instrument, portfolio, iterable of either, or a dictionary where\n key is name and value is a priceable\n \"\"\"\n super().__init__()\n if isinstance(priceables, dict):\n priceables_list = []\n for name, priceable in priceables.items():\n priceable.name = name\n priceables_list.append(priceable)\n self.priceables = priceables_list\n else:\n self.priceables = priceables\n\n self.__name = name\n self.__id = None\n\n def __getitem__(self, item):\n if isinstance(item, (int, slice)):\n return self.__priceables[item]\n elif isinstance(item, PortfolioPath):\n return item(self, rename_to_parent=True)\n else:\n values = tuple(self[p] for p in self.paths(item))\n return values[0] if len(values) == 1 else values\n\n def __contains__(self, item):\n if isinstance(item, PriceableImpl):\n return any(item in p.__priceables_lookup for p in self.all_portfolios + (self,))\n elif isinstance(item, str):\n return any(item in p.__priceables_by_name for p in self.all_portfolios + (self,))\n else:\n return False\n\n def __len__(self):\n return len(self.__priceables)\n\n def __iter__(self):\n return iter(self.__priceables)\n\n @property\n def __pricing_context(self) -> PricingContext:\n return PricingContext.current if not PricingContext.current.is_entered else nullcontext()\n\n @property\n def id(self) -> str:\n return self.__id\n\n @property\n def name(self) -> str:\n return self.__name\n\n @property\n def priceables(self) -> Tuple[PriceableImpl, ...]:\n return self.__priceables\n\n @priceables.setter\n def priceables(self, priceables: Union[PriceableImpl, Iterable[PriceableImpl]]):\n self.__priceables = (priceables,) if isinstance(priceables, PriceableImpl) else tuple(priceables)\n self.__priceables_lookup = {}\n self.__priceables_by_name = {}\n\n for idx, i in enumerate(self.__priceables):\n self.__priceables_lookup.setdefault(copy.copy(i), []).append(idx)\n if i and i.name:\n self.__priceables_by_name.setdefault(i.name, []).append(idx)\n\n @priceables.deleter\n def priceables(self):\n self.__priceables = None\n self.__priceables_lookup = None\n self.__priceables_by_name = None\n\n @property\n def instruments(self) -> Tuple[Instrument, ...]:\n return tuple(set(i for i in self.__priceables if isinstance(i, Instrument)))\n\n @property\n def all_instruments(self) -> Tuple[Instrument, ...]:\n return tuple(set(chain(self.instruments, chain.from_iterable(p.instruments for p in self.all_portfolios))))\n\n @property\n def portfolios(self) -> Tuple[PriceableImpl, ...]:\n return tuple(i for i in self.__priceables if isinstance(i, Portfolio))\n\n @property\n def all_portfolios(self) -> Tuple[PriceableImpl, ...]:\n stack = list(self.portfolios)\n portfolios = set(stack)\n\n while stack:\n portfolio = stack.pop()\n if portfolio in portfolios:\n continue\n\n sub_portfolios = portfolio.portfolios\n portfolios.update(sub_portfolios)\n stack.extend(sub_portfolios)\n\n return tuple(portfolios)\n\n def subset(self, paths: Iterable[PortfolioPath], name=None):\n return Portfolio(tuple(self[p] for p in paths), name=name)\n\n @staticmethod\n def __from_internal_positions(id_type: str, positions_id):\n instruments = GsPortfolioApi.get_instruments_by_position_type(id_type, positions_id)\n return Portfolio(instruments, name=positions_id)\n\n @staticmethod\n def from_eti(eti: str):\n return Portfolio.__from_internal_positions('ETI', eti.replace(',', '%2C'))\n\n @staticmethod\n def from_book(book: str, book_type: str = 'risk'):\n return Portfolio.__from_internal_positions(book_type, book)\n\n @staticmethod\n def from_asset_id(asset_id: str, date=None):\n asset = GsAssetApi.get_asset(asset_id)\n response = GsAssetApi.get_asset_positions_for_date(asset_id, date) if date else \\\n GsAssetApi.get_latest_positions(asset_id)\n response = response[0] if isinstance(response, tuple) else response\n positions = response.positions if isinstance(response, PositionSet) else response['positions']\n instruments = GsAssetApi.get_instruments_for_positions(positions)\n ret = Portfolio(instruments, name=asset.name)\n ret.__id = asset_id\n return ret\n\n @staticmethod\n def from_asset_name(name: str):\n asset = GsAssetApi.get_asset_by_name(name)\n return Portfolio.load_from_portfolio_id(asset.id)\n\n @staticmethod\n def from_portfolio_id(portfolio_id: str, date=None):\n portfolio = GsPortfolioApi.get_portfolio(portfolio_id)\n response = GsPortfolioApi.get_positions_for_date(portfolio_id, date) if date else\\\n GsPortfolioApi.get_latest_positions(portfolio_id)\n response = response[0] if isinstance(response, tuple) else response\n positions = response.positions if isinstance(response, PositionSet) else response['positions']\n instruments = GsAssetApi.get_instruments_for_positions(positions)\n ret = Portfolio(instruments, name=portfolio.name)\n ret.__id = portfolio_id\n return ret\n\n @staticmethod\n def from_portfolio_name(name: str):\n portfolio = GsPortfolioApi.get_portfolio_by_name(name)\n return Portfolio.load_from_portfolio_id(portfolio.id)\n\n def save(self, overwrite: Optional[bool] = False):\n if self.portfolios:\n raise ValueError('Cannot save portfolios with nested portfolios')\n\n if self.__id:\n if not overwrite:\n raise ValueError(f'Portfolio with id {id} already exists. Use overwrite=True to overwrite')\n else:\n if not self.__name:\n raise ValueError('name not set')\n\n try:\n self.__id = GsPortfolioApi.get_portfolio_by_name(self.__name).id\n if not overwrite:\n raise RuntimeError(\n f'Portfolio {self.__name} with id {self.__id} already exists. Use overwrite=True to overwrite')\n except ValueError:\n from gs_quant.target.portfolios import Portfolio as MarqueePortfolio\n self.__id = GsPortfolioApi.create_portfolio(MarqueePortfolio('USD', self.__name)).id\n _logger.info(f'Created Marquee portfolio {self.__name} with id {self.__id}')\n\n position_set = PositionSet(\n position_date=dt.date.today(),\n positions=tuple(Position(asset_id=GsAssetApi.get_or_create_asset_from_instrument(i))\n for i in self.instruments))\n\n GsPortfolioApi.update_positions(self.__id, (position_set,))\n\n @classmethod\n def from_frame(\n cls,\n data: pd.DataFrame,\n mappings: dict = {},\n date_formats: list = None,\n ):\n trade_list = []\n attribute_map = {}\n\n data = data.replace({np.nan: None})\n\n for index, row in data.iterrows():\n if is_empty(row):\n continue\n try:\n instrument_type = get_value(row, mappings, 'type')\n asset_class = get_value(row, mappings, 'asset_class')\n except ValueError:\n pass\n\n if 'tdapi' in str(instrument_type):\n inputs = {'$type': instrument_type[6:]}\n [instrument, attributes] = get_instrument(instrument_type[6:], instr_map=attribute_map, tdapi=True)\n else:\n inputs = {'asset_class': asset_class, 'type': instrument_type}\n [instrument, attributes] = get_instrument(instrument_type,\n instr_class=asset_class,\n instr_map=attribute_map)\n\n for attribute in attributes:\n if attribute == 'type' or attribute == 'asset_class':\n continue\n\n additional = []\n prop_type = instrument.prop_type(attribute, additional)\n additional.append(prop_type)\n\n if prop_type is dt.date:\n value = get_date(row, mappings, attribute, date_formats)\n else:\n value = get_value(row, mappings, attribute)\n if value is not None and type(value) not in (float, int):\n if float in additional:\n value = string_to_float(value)\n\n if value is not None:\n if type(value) is str:\n value.strip(' ')\n inputs[attribute] = value\n\n trade = Instrument.from_dict(inputs)\n trade_list.append(trade)\n\n return cls(trade_list)\n\n @classmethod\n def from_csv(\n cls,\n csv_file: str,\n mappings: dict = {},\n date_formats: list = None,\n ):\n data = pd.read_csv(csv_file, skip_blank_lines=True).replace({np.nan: None})\n return cls.from_frame(data, mappings, date_formats)\n\n def append(self, priceables: Union[PriceableImpl, Iterable[PriceableImpl]]):\n self.priceables += ((priceables,) if isinstance(priceables, PriceableImpl) else tuple(priceables))\n\n def pop(self, item) -> PriceableImpl:\n priceable = self[item]\n self.priceables = [inst for inst in self.instruments if inst != priceable]\n return priceable\n\n def to_frame(self, mappings: dict = {}) -> pd.DataFrame:\n def to_records(portfolio: Portfolio) -> list:\n records = []\n\n for priceable in portfolio.priceables:\n if isinstance(priceable, Portfolio):\n records.extend(to_records(priceable))\n else:\n records.append(dict(chain(priceable.as_dict().items(),\n (('instrument', priceable), ('portfolio', portfolio.name)))))\n\n return records\n\n df = pd.DataFrame.from_records(to_records(self)).set_index(['portfolio', 'instrument'])\n all_columns = df.columns.to_list()\n columns = sorted(c for c in all_columns if c not in ('asset_class', 'type'))\n if 'asset_class' in all_columns:\n columns = ['asset_class', 'type'] + columns\n\n df = df[columns]\n\n for key, value in mappings.items():\n if isinstance(value, str):\n df[key] = df[value]\n elif callable(value):\n df[key] = len(df) * [None]\n df[key] = df.apply(value, axis=1)\n\n return df\n\n def to_csv(self, csv_file: str, mappings: dict = {}, ignored_cols: list = []):\n port_df = self.to_frame(mappings)\n port_df = port_df[np.setdiff1d(port_df.columns, ignored_cols)]\n port_df.reset_index(drop=True, inplace=True)\n\n port_df.to_csv(csv_file)\n\n @property\n def all_paths(self) -> Tuple[PortfolioPath, ...]:\n paths = ()\n stack = [(None, self)]\n while stack:\n parent, portfolio = stack.pop()\n\n for idx, priceable in enumerate(portfolio.__priceables):\n path = parent + PortfolioPath(idx) if parent is not None else PortfolioPath(idx)\n if isinstance(priceable, Portfolio):\n stack.append((path, priceable))\n else:\n paths += (path,)\n\n return paths\n\n def paths(self, key: Union[str, PriceableImpl]) -> Tuple[PortfolioPath, ...]:\n if not isinstance(key, (str, Instrument, Portfolio)):\n raise ValueError('key must be a name or Instrument or Portfolio')\n\n idx = self.__priceables_by_name.get(key) if isinstance(key, str) else self.__priceables_lookup.get(key)\n paths = tuple(PortfolioPath(i) for i in idx) if idx else ()\n\n for path, porfolio in ((PortfolioPath(i), p)\n for i, p in enumerate(self.__priceables) if isinstance(p, Portfolio)):\n paths += tuple(path + sub_path for sub_path in porfolio.paths(key))\n\n return paths\n\n def resolve(self, in_place: bool = True) -> Optional[Union[PricingFuture, PriceableImpl, dict]]:\n pricing_context = self.__pricing_context\n with pricing_context:\n futures = [i.resolve(in_place) for i in self.__priceables]\n\n if not in_place:\n ret = {} if isinstance(PricingContext.current, HistoricalPricingContext) else Portfolio(name=self.name)\n result_future = PricingFuture() if not isinstance(pricing_context, PricingContext) \\\n or pricing_context.is_async or pricing_context.is_entered else None\n\n def cb(future):\n if isinstance(ret, Portfolio):\n ret.priceables = [f.result() for f in future.futures]\n else:\n priceables_by_date = {}\n for future in futures:\n for date, priceable in future.result().items():\n priceables_by_date.setdefault(date, []).append(priceable)\n\n for date, priceables in priceables_by_date.items():\n ret[date] = Portfolio(priceables, name=self.name)\n\n if result_future:\n result_future.set_result(ret)\n\n CompositeResultFuture(futures).add_done_callback(cb)\n return result_future or ret\n\n def calc(self, risk_measure: Union[RiskMeasure, Iterable[RiskMeasure]], fn=None) -> PortfolioRiskResult:\n with self.__pricing_context:\n return PortfolioRiskResult(self,\n (risk_measure,) if isinstance(risk_measure, RiskMeasure) else risk_measure,\n [p.calc(risk_measure, fn=fn) for p in self.__priceables])\n\n\ndef np_to_fundamental_type(value):\n if isinstance(value, np.generic):\n return value.item()\n return value\n\n\ndef from_excel_date(ordinal, _epoch0=dt.datetime(1899, 12, 31)):\n if isinstance(ordinal, float):\n if ordinal > 59:\n ordinal -= 1 # Excel leap year bug, 1900 is not a leap year!\n return (_epoch0 + dt.timedelta(days=ordinal)).replace(microsecond=0).date()\n\n return ordinal\n\n\ndef get_value(row, mappings, attribute):\n if attribute in mappings.keys():\n if isinstance(mappings[attribute], str):\n return np_to_fundamental_type(row[mappings[attribute]])\n return mappings[attribute](row)\n elif attribute in row.index:\n return row[attribute]\n\n\nvalid_date_formats = ['%Y-%m-%d', # '2020-07-28'\n '%d%b%y', # '28Jul20'\n '%d-%b-%y', # '28-Jul-20'\n '%d/%m/%Y'] # '28/07/2020\n\n\ndef get_date(row, mappings, attribute, date_formats: list = None):\n if date_formats is None:\n date_formats = valid_date_formats\n else:\n date_formats.append(valid_date_formats)\n\n value = get_value(row, mappings, attribute)\n if isinstance(value, (dt.datetime, dt.date)):\n return value\n\n ordinal = from_excel_date(value)\n r = None\n if ordinal is not None:\n for f in date_formats:\n try:\n r = dt.datetime.strptime(ordinal, f).date()\n except ValueError:\n pass\n if r is not None:\n return r\n\n return ordinal\n\n\ndef string_to_float(string):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')\n try:\n return locale.atof(string)\n except ValueError:\n pass\n try:\n return float(string.strip('%')) / 100\n except ValueError:\n pass\n\n return string\n\n\ndef is_empty(serie):\n for elem in serie.values:\n if elem is not None:\n return False\n return True\n\n\ndef get_instrument(instr_type, instr_class=None, instr_map=None, tdapi=False):\n if instr_map is None:\n instr_map = {}\n if tdapi:\n if instr_type in instr_map:\n instr = instr_map[instr_type][0]\n attri = instr_map[instr_type][1]\n else:\n instr = Instrument().from_dict({'$type': instr_type})\n attri = instr.properties()\n instr_map[instr_type] = (instr, attri)\n else:\n if (instr_class, instr_type) in instr_map:\n instr = instr_map[(instr_class, instr_type)][0]\n attri = instr_map[(instr_class, instr_type)][1]\n else:\n instr = Instrument().from_dict({'asset_class': instr_class, 'type': instr_type})\n attri = instr.properties()\n instr_map[(instr_class, instr_type)] = (instr, attri)\n return [instr, attri]\n","repo_name":"Harishangaran/gs-quant","sub_path":"gs_quant/markets/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":18092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"90"} +{"seq_id":"26579701524","text":"import gzip\r\nfrom collections import defaultdict\r\nimport random\r\nimport math\r\n\r\ndef readGz(f):\r\n for l in gzip.open(f):\r\n yield eval(l)\r\n\r\ndef getMSE(a,b1,b2,train_set):\r\n sum = 0\r\n for u,b in train_set:\r\n sum += math.pow(a + b1[u] + b2[b] - train_set[(u,b)], 2)\r\n MSE = 1/100000*sum\r\n return MSE\r\n\r\nallRatings = []\r\ntrainRatings = {}\r\nvalidationRatings = {}\r\ntrain_users = defaultdict(list)\r\ntrain_items = defaultdict(list)\r\ntotal = 0\r\nfor l in readGz(\"train.json.gz\"):\r\n user,business = l['reviewerID'],l['itemID']\r\n if total < 100000:\r\n train_users[user].append(business)\r\n train_items[business].append(user)\r\n trainRatings[(user,business)] = l['rating']\r\n allRatings.append(l['rating'])\r\n else:\r\n validationRatings[(user,business)] = l['rating']\r\n total += 1\r\n\r\n########################init##########################\r\nalpha = sum(allRatings)/100000\r\nnew_alpha = 0\r\nbetta_u = {}\r\nnew_betta_u = {}\r\nbetta_i = {}\r\nnew_betta_i = {}\r\nlamda = 1\r\nfor p in train_users:\r\n betta_u[p] = 0\r\nfor q in train_items:\r\n betta_i[q] = 0\r\nMSE = 0\r\nnewMSE = 0\r\n\r\nwhile True:\r\n MSE = getMSE(alpha, betta_u, betta_i, trainRatings)\r\n temp = 0\r\n for u,b in trainRatings:\r\n temp += trainRatings[(u,b)] - betta_u[u] - betta_i[b]\r\n new_alpha = temp / 100000\r\n for u in betta_u:\r\n temp = 0\r\n for b in train_users[u]:\r\n temp += trainRatings[(u,b)] - (alpha + betta_i[b])\r\n print(u,temp)\r\n new_betta_u[u] = temp/(lamda + len(train_users[u]))\r\n\r\n for b in betta_i:\r\n temp = 0\r\n for u in train_items[b]:\r\n temp += trainRatings[(u,b)] - (alpha + betta_u[u])\r\n new_betta_i[b] = temp/(lamda + len(train_items[b]))\r\n newMSE = getMSE(new_alpha, new_betta_u, new_betta_i, trainRatings)\r\n print(MSE,newMSE)\r\n if abs(MSE - newMSE) < 0.0001:\r\n break\r\n else:\r\n alpha = new_alpha\r\n betta_u = new_betta_u\r\n betta_i = new_betta_i\r\n\r\n # ##############################fix alpha and betta_i with your updated betta_u##################################\r\n # MSE = getMSE(alpha, betta_u, betta_i, trainRatings)\r\n # for u in betta_u:\r\n # temp = 0\r\n # for b in train_users[u]:\r\n # temp += trainRatings[(u, b)] - (alpha + betta_i[b])\r\n # new_betta_u[u] = temp / (lamda + len(train_users[u]))\r\n # temp = 0\r\n # for u, b in trainRatings:\r\n # temp += trainRatings[(u, b)] - new_betta_u[u] - betta_i[b]\r\n # new_alpha = temp / 100000\r\n # for b in betta_i:\r\n # temp = 0\r\n # for u in train_items[b]:\r\n # temp += trainRatings[(u, b)] - (alpha + new_betta_u[u])\r\n # new_betta_i[b] = temp / (lamda + len(train_items[b]))\r\n #\r\n # newMSE = getMSE(new_alpha, new_betta_u, new_betta_i, trainRatings)\r\n # print(MSE, newMSE)\r\n # if MSE - newMSE < 0.0000001:\r\n # break\r\n # else:\r\n # alpha = new_alpha\r\n # betta_u = new_betta_u\r\n # betta_i = new_betta_i\r\n # ##############################fix alpha and betta_u in your updated betta_i##################################\r\n # MSE = getMSE(alpha, betta_u, betta_i, trainRatings)\r\n # for b in betta_i:\r\n # temp = 0\r\n # for u in train_items[b]:\r\n # temp += trainRatings[(u, b)] - (alpha + betta_u[u])\r\n # new_betta_i[b] = temp / (lamda + len(train_items[b]))\r\n # for u in betta_u:\r\n # temp = 0\r\n # for b in train_users[u]:\r\n # temp += trainRatings[(u, b)] - (alpha + new_betta_i[b])\r\n # new_betta_u[u] = temp / (lamda + len(train_users[u]))\r\n # temp = 0\r\n # for u, b in trainRatings:\r\n # temp += trainRatings[(u, b)] - betta_u[u] - new_betta_i[b]\r\n # new_alpha = temp / 100000\r\n\r\n\r\n # newMSE = getMSE(new_alpha, new_betta_u, new_betta_i, trainRatings)\r\n # print(MSE, newMSE)\r\n # if MSE - newMSE < 0.0000001:\r\n # break\r\n # else:\r\n # alpha = new_alpha\r\n # betta_u = new_betta_u\r\n # betta_i = new_betta_i\r\n\r\n# alpha = new_alpha\r\n# betta_u = new_betta_u\r\n# betta_i = new_betta_i\r\n\r\n\r\nfor u,b in validationRatings:\r\n if u not in train_users:\r\n betta_u[u] = 0\r\n if b not in train_items:\r\n betta_i[b] = 0\r\nMSE_validation = getMSE(alpha,betta_u,betta_i,validationRatings)\r\nprint(MSE_validation)\r\n","repo_name":"zyk19960912/Recommender-System","sub_path":"rating_prediction.py","file_name":"rating_prediction.py","file_ext":"py","file_size_in_byte":4342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"72849468457","text":"\"\"\"\nLinkage Tree.\n\n:class:`LinkageTree` is the internal representation of dendrograms.\n\n:class:`LinkageNode` and :class:`LeafNode` are the building blocks of\n:class:`LinkageTree`. Both these classes inherit from :class:`BaseNode`.\n\"\"\"\nfrom __future__ import annotations\n\nfrom copy import copy\nfrom typing import Any, Iterable, Iterator, List, Optional, Sequence, Tuple\n\nimport numpy as np\nimport numpy.typing as npt\n\nfrom ..api import AllTracker, inheritdoc\nfrom ..expression import Expression, HasExpressionRepr\nfrom ..expression.atomic import Id\nfrom .linkage import LeafNode, LinkageNode, Node\n\n#\n# Exported names\n#\n\n__all__ = [\"LinkageTree\"]\n\n\n#\n# Type variables\n#\n\nLinkageMatrix = npt.NDArray[np.float_]\n\n\n#\n# Constants\n#\n\nASSERTION_CURRENT_NODE_NOT_A_LEAF = \"current node must not be a leaf\"\n\n\n#\n# Ensure all symbols introduced below are included in __all__\n#\n\n__tracker = AllTracker(globals())\n\n\n#\n# Class definitions\n#\n\n\n@inheritdoc(match=\"[see superclass]\")\nclass LinkageTree(HasExpressionRepr):\n \"\"\"\n A traversable tree derived from a SciPy linkage matrix.\n\n Supports :func:`len`, numerical indexing, and node iteration.\n \"\"\"\n\n __F_CHILD_LEFT = 0\n __F_CHILD_RIGHT = 1\n __F_CHILDREN_DISTANCE = 2\n\n #: The original linkage matrix created by :func:`scipy.cluster.hierarchy.linkage`.\n #:\n #: One row of the scipy linkage matrix is a quadruple:\n #: `(,\n #: ,\n #: ,\n #: )`,\n #: where the descendant nodes include the nodes from the entire sub-tree,\n #: from direct children down to leaf nodes.\n scipy_linkage_matrix: LinkageMatrix\n\n #: The maximum possible distance in the linkage tree; this determines the height of\n #: the tree to be drawn.\n max_distance: float\n\n #: A label describing the type/unit of distances\n #: passed in arg `scipy_linkage_matrix` (optional).\n distance_label: Optional[str]\n\n #: A label describing the type of names\n #: passed in arg `leaf_names` (optional).\n leaf_label: Optional[str]\n\n #: A label describing the type/unit of weights\n #: passed in arg `leaf_weights` (optional).\n weight_label: Optional[str]\n\n def __init__(\n self,\n *,\n scipy_linkage_matrix: LinkageMatrix,\n leaf_names: Iterable[str],\n leaf_weights: Iterable[float],\n max_distance: Optional[float] = None,\n distance_label: Optional[str] = None,\n leaf_label: Optional[str] = None,\n weight_label: Optional[str] = None,\n ) -> None:\n \"\"\"\n :param scipy_linkage_matrix: linkage matrix calculated by function\n :func:`scipy.cluster.hierarchy.linkage`\n :param leaf_names: labels of the leaves\n :param leaf_weights: weight of the leaves; all values must range between `0.0`\n and `1.0`, and should add up to `1.0`\n :param max_distance: maximum theoretical distance value; this must be equal\n to, or greater than the maximum distance in arg `scipy_linkage_matrix`\n (optional)\n :param distance_label: a label describing the type and/or unit of distances\n passed in arg `scipy_linkage_matrix` (optional)\n :param leaf_label: a label describing the type of names\n passed in arg `leaf_names` (optional)\n :param weight_label: a label describing the type and/or unit of weights\n passed in arg `leaf_weights` (optional)\n \"\"\"\n\n n_branches = len(scipy_linkage_matrix)\n n_leaves = n_branches + 1\n\n def _validate_leaves(var: Sequence[Any], var_name: str) -> None:\n if len(var) != n_leaves:\n raise ValueError(f\"expected {n_leaves} values for arg {var_name}\")\n\n self.scipy_linkage_matrix = scipy_linkage_matrix\n\n leaf_names = [str(name) for name in leaf_names]\n leaf_weights = [float(weight) for weight in leaf_weights]\n\n _validate_leaves(leaf_names, \"leaf_labels\")\n _validate_leaves(leaf_weights, \"leaf_weights\")\n\n if any(not (0.0 <= weight <= 1.0) for weight in leaf_weights):\n raise ValueError(\n \"all values in arg leaf_weights are required to be in the range \"\n \"from 0.0 to 1.0\"\n )\n\n self._nodes: List[Node] = [\n *[\n LeafNode(index=index, name=label, weight=weight)\n for index, (label, weight) in enumerate(zip(leaf_names, leaf_weights))\n ],\n *[\n LinkageNode(\n index=index + n_leaves,\n children_distance=scipy_linkage_matrix[index][\n LinkageTree.__F_CHILDREN_DISTANCE\n ],\n )\n for index in range(n_branches)\n ],\n ]\n\n root_children_distance = self._nodes[-1].children_distance\n if max_distance is None:\n max_distance = root_children_distance\n elif max_distance < root_children_distance:\n raise ValueError(\n f\"arg max_distance={max_distance} must be equal to or greater than \"\n f\"the maximum distance (= {root_children_distance}) in the linkage tree\"\n )\n\n self.max_distance = max_distance\n self.leaf_label = leaf_label\n self.weight_label = weight_label\n self.distance_label = distance_label\n\n @property\n def root(self) -> Node:\n \"\"\"\n The root node of the linkage tree.\n \"\"\"\n return self._nodes[-1]\n\n def children(self, node: Node) -> Optional[Tuple[Node, Node]]:\n \"\"\"\n Get the children of the given node.\n\n :param node: the node for which to get the children\n :return: ``None`` if the node is a leaf, otherwise the pair of children\n \"\"\"\n\n node_index = node.index\n nodes = self._nodes\n\n # check that the node is included in this tree\n if node_index >= len(nodes) or node is not nodes[node_index]:\n raise ValueError(\"arg node is not a node in this linkage tree\")\n\n if node.is_leaf:\n return None\n else:\n # noinspection PyProtectedMember\n node_linkage = self.scipy_linkage_matrix[node_index - self.n_leaves]\n ix_c1, ix_c2 = node_linkage[\n [LinkageTree.__F_CHILD_LEFT, LinkageTree.__F_CHILD_RIGHT]\n ].astype(int)\n return nodes[ix_c1], nodes[ix_c2]\n\n @property\n def n_leaves(self) -> int:\n \"\"\"\n The number of leave nodes in this linkage tree.\n \"\"\"\n return len(self) - len(self.scipy_linkage_matrix)\n\n def sort_by_weight(self) -> LinkageTree:\n \"\"\"\n Create a copy of this linkage trees, switching the left and right nodes of\n branches such that the mean leaf weight or any left node is always greater\n than the mean leaf weight in the right node.\n\n :return: a copy of this linkage tree with sorting applied\n \"\"\"\n\n linkage: LinkageMatrix = self.scipy_linkage_matrix.copy()\n\n def _sort_node(n: Node) -> Tuple[float, int]:\n # sort a linkage node and return its total weight and leaf count\n\n if n.is_leaf:\n return n.weight, 1\n\n children = self.children(n)\n assert children is not None, ASSERTION_CURRENT_NODE_NOT_A_LEAF\n l, r = children\n\n weight_left, leaves_left = _sort_node(l)\n weight_right, leaves_right = _sort_node(r)\n\n if weight_left / leaves_left < weight_right / leaves_right:\n # swap nodes if the right node has the higher weight\n n_linkage = linkage[n.index - self.n_leaves]\n n_linkage[\n [LinkageTree.__F_CHILD_RIGHT, LinkageTree.__F_CHILD_LEFT]\n ] = n_linkage[[LinkageTree.__F_CHILD_LEFT, LinkageTree.__F_CHILD_RIGHT]]\n\n return weight_left + weight_right, leaves_left + leaves_right\n\n _sort_node(self.root)\n\n linkage_sorted = copy(self)\n linkage_sorted.scipy_linkage_matrix = linkage\n return linkage_sorted\n\n def iter_nodes(self, inner: bool = True) -> Iterator[Node]:\n \"\"\"\n Traverse this linkage tree depth-first and return all nodes.\n\n :param inner: if ``True``, iterate inner nodes; if ``False``, iterate\n leaf nodes only\n :return: an iterator for all nodes\n \"\"\"\n\n def _iter(n: Node) -> Iterator[Node]:\n if n.is_leaf:\n yield n\n else:\n if inner:\n yield n\n children = self.children(n)\n assert children is not None, ASSERTION_CURRENT_NODE_NOT_A_LEAF\n l, r = children\n yield from _iter(l)\n yield from _iter(r)\n\n yield from _iter(self.root)\n\n def __len__(self) -> int:\n return len(self._nodes)\n\n def __getitem__(self, item: int) -> Node:\n return self._nodes[item]\n\n def to_expression(self) -> Expression:\n \"\"\"[see superclass]\"\"\"\n\n def _expr(n: Node) -> Expression:\n if n.is_leaf:\n return n.to_expression()\n else:\n children = self.children(n)\n assert children is not None, ASSERTION_CURRENT_NODE_NOT_A_LEAF\n l, r = children\n return n.to_expression()[_expr(l), _expr(r)]\n\n return Id(type(self))(\n _expr(self.root),\n max_distance=self.max_distance,\n leaf_label=self.leaf_label,\n weight_label=self.weight_label,\n distance_label=self.distance_label,\n )\n\n\n__tracker.validate()\n","repo_name":"BCG-Gamma/pytools","sub_path":"src/pytools/data/_linkage.py","file_name":"_linkage.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"90"} +{"seq_id":"18288268799","text":"N,M = map(int,input().split())\n\njudge = [False]*N\nWAcnt = [0] * N\nACcnt = 0\nfor i in range(M):\n p,S = (x for x in input().split())\n p = int(p)\n if judge[p-1] == False:\n if S == \"AC\":\n judge[p-1] = True\n ACcnt += 1\n else:\n WAcnt[p-1] += 1\n\nWAans = 0\nfor i in range(N):\n if judge[i] == True:\n WAans += WAcnt[i]\n\nprint(ACcnt,WAans)","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02802/s603201275.py","file_name":"s603201275.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"27694251763","text":"# You are given an m x n 2D image matrix (List of Lists) where each integer represents a pixel. Flip it in-place along its vertical axis.\n#\n# Example:\n# Input image :\n# 1 0\n# 1 0\n#\n# Modified to :\n# 0 1\n# 0 1\n# My Notes:\n# - Needs to occur in-place, so no other data structures\n# - Only flipped along vertical axis, so just need to swap along each row\n# - I could use something like .reverse() or [::-1], but it's probably better practice to do more manually\n\ntest_inputs = [ # Expected Results:\n [[1]], # [[1]]\n [[1,0,1],[1,0,1]], # [[1, 0, 1], [1, 0, 1]]\n [[1,0]], # [[0, 1]]\n [[1,2,3],[4,5,6],[7,8,9]], # [[3, 2, 1], [6, 5, 4], [9, 8, 7]]\n [[1,0,0],[0,0,1]] # [[0, 0, 1], [1, 0, 0]]\n]\n\n\ndef flip_vertical_axis(matrix):\n # Base case: If length of row is <= 1, we don't have to do anything\n if len(matrix[0]) <= 1:\n return matrix\n # Loop over each row\n for row in matrix:\n # We're going to look at first and last index, then move inwards while start is < end\n start = 0\n end = len(row) - 1\n while start < end:\n # Create temp variables for each swap\n start_value = row[start]\n end_value = row[end]\n # Replace each index with the temp variable\n row[start] = end_value\n row[end] = start_value\n # Increase start, decrease end\n start += 1\n end -= 1\n\n\nfor matrix in test_inputs:\n flip_vertical_axis(matrix)","repo_name":"mdrichardson/coding-challenges","sub_path":"Firecode.io/Level 1/Flip-It.py","file_name":"Flip-It.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"73691208936","text":"# JSON is commonly used with data APIS. Here's how we can parse JSON into a Python dictionary\n\nimport json\n\n# sample JSON\nuserJSON = '{\"first_name\": \"John\", \"last_name\": \"Doe\", \"Age\": 30}'\n\n# parse to dict\n\nuser = json.loads(userJSON)\n\n# Prits dictionary\nprint(user)\n\nprint(user['first_name'])\n\n# Turn a dict into JSON format\n\ncar = {'make': 'Ford', 'model': 'Mustang', 'year': 1970}\n\ncarJSON = json.dumps(car)\n\nprint(f'The json for car is {carJSON}')","repo_name":"thejamesgore/python-general","sub_path":"01_Basics/py_json.py","file_name":"py_json.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"23045469161","text":"'''\n844. Backspace String Compare\nEasy\n\nGiven two strings S and T, return if they are equal when both are typed into empty text editors. # means a backspace character.\n\nExample 1:\n\nInput: S = \"ab#c\", T = \"ad#c\"\nOutput: true\nExplanation: Both S and T become \"ac\".\n\nhttps://leetcode.com/problems/backspace-string-compare/\n'''\nclass Solution:\n def backspaceCompare(self, S: str, T: str) -> bool:\n finals, finalt = '', ''\n bk = 0\n i = len(S) - 1\n while i >= 0:\n if S[i] == '#':\n bk += 1\n elif bk >0:\n bk -= 1\n else:\n finals = S[i] + finals\n i -= 1\n i = len(T) - 1\n bk = 0\n while i >= 0:\n if T[i] == '#':\n bk += 1\n elif bk >0:\n bk -= 1\n else:\n finalt = T[i] + finalt\n i -= 1\n return finals == finalt\n","repo_name":"aditya-doshatti/Leetcode","sub_path":"backspace_string_compare_844.py","file_name":"backspace_string_compare_844.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"4735526936","text":"#import numpy as np\nimport random\nimport discord\nimport time\nimport rpgReference as rpg\n\nclass generator():\n \n def __init__(self,tDice=4,kDice=3,nStat=6,dice=6,stats=rpg.DNDStats,name=\"NPC\"):\n # TODO: Randomized names\n self.name=name\n self.totalDice=tDice\n self.keepDice=kDice\n self.nStats=nStat\n self.dice=dice\n self.usedRolls=[]\n self.stats=stats\n self.money=0\n \n \n def generate(self,npcTypeName=\"none\"):\n #TODO Separate Classes and Races\n #print(\"Generating Stats\")\n npcType,money=rpg.npcType[npcTypeName]\n stats,self.usedRolls=self.genStats(self.nStats,self.dice)\n #----------------------------------\n #to be removed\n #----------------------------------\n #i=0\n #print(\"Your rolls were\")\n #for used in self.usedRolls:\n # i+=1\n # print(\"Dice %d: \" % i+str(used))\n #----------------------------------\n if npcTypeName!=\"none\":\n stats=self.sortStats(stats,npcType)\n i=0\n for s in self.stats:\n self.stats[s]=stats[i]\n #print(s+\" : \"+str(self.stats[s]))\n i+=1\n self.money=self.genMoney(money)\n return\n\n def sortStats(self,oStats,npcConf):\n newStats=[]\n for i in npcConf:\n newStats.append(0)\n tmpStats=oStats.copy()\n tmpStats.sort(reverse=1)\n priority=1\n while priority<=len(npcConf):\n #print(priority)\n for current in range(len(npcConf)):\n if npcConf[current][0]==priority:\n if tmpStats[priority-1]!=0:\n newStats[current]=tmpStats[priority-1]+npcConf[current][1]\n tmpStats[priority-1]=0\n priority+=1\n for s in range(len(newStats)):\n while newStats[s]==0:\n r = random.randint(0,len(npcConf)-1)\n #print(\"-------\")\n #print(\"status %d\" % s)\n #print(\"random=%d\"% r)\n #print(\"Nuevo:%d\"%newStats[s])\n #print(\"Viejo:%d\"%tmpStats[r])\n #time.sleep(2)\n newStats[s]=tmpStats[r]\n tmpStats[r]=0\n return newStats\n\n def genMoney(self,moneyLevel):\n #TODO dice roller\n #multiplier=1\n #addition=0\n moneyLevel=rpg.NORTEMoney[moneyLevel]\n number,faces=moneyLevel.split(\"d\")\n faces,multiplier=faces.split(\"*\")\n stat=0\n for i in range(int(number)):\n stat+=random.randint(1,int(faces))\n stat*=int(multiplier)\n return stat\n\n\n \n def genStats(self,n,d):\n stats=[]\n rolls=[]\n\n for i in range(n):\n stat,roll=self.rollStat(d)\n stats.append(stat)\n rolls.append(roll)\n return stats,rolls\n \n def rollStat(self,d):\n roll=[]\n stat=0\n for i in range(self.totalDice):\n roll.append(random.randint(1,d))\n #print(\"Die %d: \" % i + str(roll[i]))\n roll.sort(reverse=1)\n #print(\"Keeping:\")\n for i in range(self.keepDice):\n stat+=roll[i]\n #print(roll[i])\n #print(\"Status: %d\" % stat)\n return stat,roll\n\n def randomTable(self,table=\"races\",minDice=1,maxDice=100):\n t = rpg.randomTables[table]\n res = \"none\"\n v = random.randint(minDice,maxDice)\n res=\"Dice: %d\\n\" % v\n for case in t:\n mi,ma=case.split(\"-\")\n if v>=int(mi) and v<=int(ma):\n res+=t[case]\n break\n return res\n\nclass npc():\n\n def __init__(self,name,hp):\n self.name=name\n self.hp=hp\n\n def damage(self,hit):\n self.hp-=hit\n if self.hp<=0:\n self.die()\n return self.hp\n\n def heal(self,energy):\n self.hp+=energy\n return self.hp\n\n def revive(self):\n print(self.name+\" It's alive!\")\n self.hp=1\n return\n\n def die(self):\n print(self.name+\" it's dead\")\n return\n","repo_name":"bribot/FriendComputer","sub_path":"npc.py","file_name":"npc.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"35655099632","text":"import bs4\nimport urllib.request\n\nurl = \"https://finance.naver.com/sise/\"\nreq = urllib.request.urlopen(url).read().decode('euc-kr')\nsoup = bs4.BeautifulSoup(req, 'html.parser')\n#print(soup.prettify())\n\ntop10 = soup.select('#siselist_tab_0 > tr')\n\na = 1\nfor i in top10 :\n if(i.select('a') != []) :\n print(\"{}. {} : {}\".format(a, i.select_one('a').string, i.select_one('td:nth-of-type(5)').string))\n a+=1\n","repo_name":"cuzai/pythonStudy","sub_path":"section2/finance_test2.py","file_name":"finance_test2.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18020559999","text":"def dfs(v):\n if sum(visited) == N:\n return 1\n res = 0\n for nv in g[v]:\n if visited[nv]:\n continue\n visited[nv] = 1\n res += dfs(nv)\n visited[nv] = 0\n return res\n\n\nN, M, *ab = map(int, open(0).read().split())\ng = [[] for _ in range(N)]\nfor a, b in zip(*[iter(ab)] * 2):\n a -= 1\n b -= 1\n g[a].append(b)\n g[b].append(a)\n\nvisited = [0] * N\nvisited[0] = 1\nprint(dfs(0))\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p03805/s011780493.py","file_name":"s011780493.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"11028636523","text":"\"\"\"\nadd_notification_type.\n\nRevision ID: 45fd8b9869d4\nRevises: 94836b099894\nCreate Date: 2016-12-01 12:02:19.724528\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = \"45fd8b9869d4\"\ndown_revision = \"94836b099894\"\n\n\ndef upgrade(op, tables, tester):\n op.bulk_insert(\n tables.notificationkind,\n [\n {\"name\": \"build_cancelled\"},\n ],\n )\n\n\ndef downgrade(op, tables, tester):\n op.execute(\n tables.notificationkind.delete().where(\n tables.notificationkind.c.name == op.inline_literal(\"build_cancelled\")\n )\n )\n","repo_name":"quay/quay","sub_path":"data/migrations/versions/45fd8b9869d4_add_notification_type.py","file_name":"45fd8b9869d4_add_notification_type.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":2281,"dataset":"github-code","pt":"90"} +{"seq_id":"12380598399","text":"# image_store_protocol.py\n# Script for storing DICOM images in the database encrypted under specific attributes.\n#\n# Author: Sydney Pugh\n# Date: 26 April 2020\n\nimport mysql.connector\nimport subprocess\nimport pydicom\nimport sys\n\nPOLICY_FMT = (\"{hosp} and \"\n \"(sysadmin or \"\n \"( (imaging_technician and {im_spec}) or \"\n \"( {dept} and \"\n \"( attending_physician or \"\n \"( nurse and {n_lvl} ) ) ) ) )\"\n )\n\n\n## Enforce modality to provide DICOM file\nif len(sys.argv) != 2:\n print('USAGE: python image_store_protocol.py ')\n exit(-1)\n\n\n## Request policy parameters from modality\nhospital = raw_input('Enter hospital: ')\nhospital = hospital.strip('\\n')\n\nimage_spec = raw_input('Enter imaging specialty: ')\nimage_spec = image_spec.strip('\\n')\n\ndepartment = raw_input('Enter department: ')\ndepartment = department.strip('\\n')\n\nnurse_lvl = raw_input('Enter nurse level: ')\nnurse_lvl = nurse_lvl.strip('\\n')\n\n\n## Encrypt DICOM file under ABE\n# encryption cmmd format: cpabe-enc \npolicy = POLICY_FMT.format(hosp=hospital, im_spec=image_spec, dept=department, n_lvl=nurse_lvl)\ncmmd = ['cpabe-enc', '-k', '-o', '.temp-enc.cpabe', './cpabe-keys/pub_key', sys.argv[1], policy]\n\nret = subprocess.call(cmmd)\nif ret != 0:\n print('cpabe-enc returned ' + str(ret) + ' ...')\n exit(-1)\n\nfp = open('.temp-enc.cpabe', 'r')\nencrypted_data = fp.read()\nfp.close()\n\n\n## Extract parameters from DICOM file\ndr = pydicom.dcmread(sys.argv[1])\n\n\n## Store encrypted DICOM file in database\ncnx = mysql.connector.connect(\n host='localhost',\n user='',\n passwd='',\n database='mydb',\n auth_plugin='mysql_native_password'\n)\ncursor = cnx.cursor()\n\nquery = (\"INSERT INTO imaging_db \"\n \"(PATIENT_ID,STUDY_ID,STUDY_DATE,STUDY_TIME,IMAGE_TYPE,DICOM_FILE) \"\n \"VALUES (%s,%s,%s,%s,%s,%s)\")\n\ndata = (dr.PatientID, dr.StudyID, dr.StudyDate, dr.StudyTime, dr.Modality, encrypted_data)\n\ncursor.execute(query, data)\ncnx.commit()\n\ncursor.close()\ncnx.close()\n\n\n## Remove encryption file\nsubprocess.call(['rm','.temp-enc.cpabe'])\n","repo_name":"sfpugh/cis-700-proj","sub_path":"image_store_protocol.py","file_name":"image_store_protocol.py","file_ext":"py","file_size_in_byte":2165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"35748469531","text":"#!/usr/bin/env python3\n\"\"\" This module converts a string to a number\n\"\"\"\nimport sys\ndef convert(input_s):\n \"\"\" This is the convert function\n \"\"\"\n try:\n x_return = int(input_s)\n return x_return\n except (ValueError, TypeError) as e_out:\n print(\"Conversion Error: {}\".format(str(e_out)), file=sys.stderr)\n raise\n","repo_name":"hansknecht/PythonSandbox","sub_path":"PythonSandbox/exceptional.py","file_name":"exceptional.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"38735773710","text":"# Import all necessary libraries\r\nimport pandas as pd\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom pprint import pprint\r\nimport re\r\nfrom bs4 import BeautifulSoup\r\nfrom nltk.tokenize import WordPunctTokenizer\r\n\r\n# Read the training data for sentiment analysis\r\n# Stanford's Sentiment140 dataset has been used as training data\r\ncols = ['sentiment','num','date','query','id','tweet']\r\ndata = pd.read_csv('train.csv',names=cols, encoding='cp1252')\r\n# Drop any duplicates and unnecessary columns\r\ndata.dropna(inplace=True)\r\ndata.drop(['num','date','query','id'],axis=1,inplace=True)\r\n# Reassign the positive sentiment to be equal to '1'\r\ndata.loc[data.sentiment == 4, 'sentiment'] = 1\r\n\r\n# Preparing data for cleaning\r\ntok = WordPunctTokenizer()\r\npat1 = r'@[A-Za-z0-9_]+' # Remove '@' from tweets\r\npat2 = r'https?://[^ ]+' # Remove http links from tweets\r\ncombined_pat = r'|'.join((pat1, pat2))\r\nwww_pat = r'www.[^ ]+' # Remove websites from the tweets\r\n# convert negative apostrophe words into two separate words\r\nnegations_dic = {\"isn't\":\"is not\", \"aren't\":\"are not\", \"wasn't\":\"was not\", \"weren't\":\"were not\",\r\n \"haven't\":\"have not\",\"hasn't\":\"has not\",\"hadn't\":\"had not\",\"won't\":\"will not\",\r\n \"wouldn't\":\"would not\", \"don't\":\"do not\", \"doesn't\":\"does not\",\"didn't\":\"did not\",\r\n \"can't\":\"can not\",\"couldn't\":\"could not\",\"shouldn't\":\"should not\",\"mightn't\":\"might not\",\r\n \"mustn't\":\"must not\"}\r\nneg_pattern = re.compile(r'\\b(' + '|'.join(negations_dic.keys()) + r')\\b')\r\nrt = re.compile('(\\s*)rt(\\s*)') # Remove 'RT' (retweet) words from tweets\r\n\r\n# Function to clean the tweets wrt to the above mentioned cleaning aspects\r\ndef tweet_cleaner(text):\r\n soup = BeautifulSoup(text, 'lxml') # Perform HTML encoding\r\n souped = soup.get_text()\r\n try:\r\n bom_removed = souped.decode(\"utf-8-sig\").replace(u\"\\ufffd\", \"?\")\r\n except:\r\n bom_removed = souped\r\n stripped = re.sub(combined_pat, '', bom_removed)\r\n stripped = re.sub(www_pat, '', stripped)\r\n lower_case = stripped.lower() # convert everything to lower case\r\n lower_case = rt.sub('', lower_case)\r\n neg_handled = neg_pattern.sub(lambda x: negations_dic[x.group()], lower_case)\r\n letters_only = re.sub(\"[^a-zA-Z]\", \" \", neg_handled)\r\n # During the letters_only process two lines above, it has created unnecessay white spaces\r\n # Tokenize and join together to remove unneccessary white spaces\r\n words = [x for x in tok.tokenize(letters_only) if len(x) > 1]\r\n return (\" \".join(words)).strip()\r\n\r\n# Clean the training set data wrt to the above cleaning function\r\nprint(\"Cleaning and parsing the training tweets...\")\r\nclean_tweets = []\r\nfor i in data.tweet:\r\n clean_tweets.append(tweet_cleaner(i))\r\nprint(\"Done!\\n\")\r\n# Get the final cleaned tarining dataset\r\nclean_df = pd.DataFrame(clean_tweets, columns=['tweet'])\r\nclean_df['sentiment'] = data.sentiment\r\n\r\n# Read the test data\r\ntest = pd.read_csv('test.csv', encoding='cp1252')\r\n# Clean the test data wrt to the above cleaning function as well\r\nprint(\"Cleaning and parsing the testing tweets...\")\r\nclean_test = []\r\nfor i in test.Tweet:\r\n clean_test.append(tweet_cleaner(i))\r\nprint(\"Done!\\n\")\r\n# Get the final dataframe containing the cleaned test data\r\ntest1 = pd.DataFrame(clean_test, columns=['tweet'])\r\n\r\n# Vectorize the tweets for both training and testing datasets\r\nvect1 = CountVectorizer(min_df=5, ngram_range=(1,2)).fit(clean_df['tweet'].values)\r\nX_vect = vect1.transform(clean_df['tweet'].values)\r\ntest_vect = vect1.transform(test1['tweet'].values)\r\n# Train the dataset with Logistic Regression\r\nmodel = LogisticRegression()\r\nmodel.fit(X_vect, clean_df['sentiment'].values)\r\npredictions = model.predict(test_vect) # Predict the results\r\nprint(predictions.mean(),\"of the total test data contains tweets of positive sentiments!\")\r\n\r\n# Obtain the CSV file in the required format\r\ntest.drop(['Date','User','v','a'],axis=1, inplace=True)\r\ntest.reset_index(inplace=True)\r\ntest.columns = ['ID', 'Contents'] # Change column names to match the format specified\r\npred_df = pd.DataFrame(predictions, columns =['Result']) # Convert predictions into a dataframe\r\nresult = pd.concat([test, pred_df], axis=1, sort=False) # Merge the two dataframes\r\nresult.to_csv('output.csv', sep=',', header=True, index=False, encoding='cp1252')\r\n","repo_name":"RohiBaner/Sentiment-Analysis-and-Opinion-Mining","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"17084921510","text":"import sys\nimport itertools\nimport time\nimport numpy as np\n\nsys.path.append('../')\n\nimport qsplit.qsplit_mlrecon_methods as qmm\n\n\n# (1) idetify cut_nodes and uncut_nodes (nodes incident to a cut and their complement)\n# (2) choose \"hot nodes\": nodes incident to a graph partition,\n# to which we will nonetheless apply a partial mixer in the first mixing layer\n# WARNING: this algorithm has combinatorial complexity:\n# O({ #cut_nodes_in_subgraph \\choose #max_cuts })\n# I am simply assuming that this complexity won't be a problem for now\n# if it becomes a problem when we scale up, we should rethink this algorithm\ndef choose_nodes(graph, subgraphs, cut_edges, max_cuts):\n cut_nodes = []\n for edge in cut_edges:\n cut_nodes.extend(edge)\n\n # collect subgraph data\n subgraph_A, subgraph_B = subgraphs\n cut_nodes_A = [node for node in subgraph_A.nodes if node in cut_nodes]\n cut_nodes_B = [node for node in subgraph_B.nodes if node in cut_nodes]\n subgraph_cut_nodes = [ (subgraph_A, cut_nodes_A), (subgraph_B, cut_nodes_B) ]\n\n # compute the cost of each choice of hot nodes\n # hot nodes should all be chosen from one subgraph, so loop over subgraph indices\n choice_cost = {}\n for ext_idx in [ 0, 1 ]:\n # ext_graph: subgraph we're \"extending\" with nodes from the complement graph\n # ext_cut_nodes: cut_nodes in ext_graph\n ext_graph, ext_cut_nodes = subgraph_cut_nodes[ext_idx]\n\n # adjacent (complement) graph and cut nodes\n adj_graph, adj_cut_nodes = subgraph_cut_nodes[1-ext_idx]\n\n # determine the number nodes in adj_cut_nodes that we need to \"throw out\".\n # nodes that are *not* thrown out are attached to ext_graph in the first mixing layer\n num_to_toss = len(adj_cut_nodes) - max_cuts\n num_to_toss = max(num_to_toss,0)\n\n # determine size of fragments after circuit cutting.\n # if there are several options (of nodes to toss) with the same \"cut cost\",\n # these fragment sizes are used to choose between those options\n ext_size = ext_graph.number_of_nodes() + len(adj_cut_nodes) - num_to_toss\n complement_size = subgraphs[1-ext_idx].number_of_nodes()\n frag_sizes = tuple(sorted([ ext_size, complement_size ], reverse = True))\n\n # if we don't need to throw out any nodes,\n # log a choice_cost of 0 and skip the calculation below\n if num_to_toss == 0:\n choice_cost[ext_idx,()] = (0,) + frag_sizes\n continue\n\n # for some node (in adj_cut_nodes) that we might throw out\n # (i) determine its neighbors in ext_graph\n # (ii) determine the degrees of those neighbors\n # (iii) add up those degrees\n def single_choice_cost(adj_node):\n return sum([ graph.degree[ext_node]\n for ext_node in graph.neighbors(adj_node)\n if ext_node in ext_graph ])\n\n # loop over all combinations of adjacent nodes that we could throw out\n for toss_nodes in itertools.combinations(adj_cut_nodes, num_to_toss):\n _choice_cost = sum([ single_choice_cost(node) for node in toss_nodes ])\n choice_cost[ext_idx,toss_nodes] = (_choice_cost,) + frag_sizes\n\n # get the index subgraph we're \"extending\" and the adjacent nodes we're tossing out\n ext_idx, toss_nodes = min(choice_cost, key = choice_cost.get)\n ext_graph, ext_cut_nodes = subgraph_cut_nodes[ext_idx]\n\n # determine whether a node in ext_graph has any neighbors in toss_nodes\n def _no_tossed_neighbors(ext_node):\n return not any( neighbor in toss_nodes for neighbor in graph.neighbors(ext_node) )\n\n # hot nodes = those without neighbors that we are tossing out\n hot_nodes = list(filter(_no_tossed_neighbors, ext_cut_nodes))\n return cut_nodes, hot_nodes\n\n\ndef simple_choose_nodes(graph, subgraphs, cut_edges, max_cuts, init_state):\n cut_nodes = []\n for edge in cut_edges:\n cut_nodes.extend(edge)\n\n # collect subgraph data\n subgraph_A, subgraph_B = subgraphs\n cut_nodes_A = [node for node in subgraph_A.nodes if node in cut_nodes]\n cut_nodes_B = [node for node in subgraph_B.nodes if node in cut_nodes]\n subgraph_cut_nodes = [ (subgraph_A, cut_nodes_A), (subgraph_B, cut_nodes_B) ]\n\n # Randomly select the subgraph to draw hot nodes from\n rand_index = np.random.choice([0,1])\n cur_subgraph, cur_cut_nodes = subgraph_cut_nodes[rand_index]\n other_subgraph, other_cut_nodes = subgraph_cut_nodes[(rand_index + 1) % 2]\n\n # Collect all potential hot nodes (i.e. where cost < max_cuts and the node is not already in the MIS)\n valid_hot_nodes = []\n for node in cur_cut_nodes:\n neighbors = list(graph.neighbors(node))\n cost = len([n for n in neighbors if n in other_cut_nodes])\n if cost <= max_cuts and list(reversed(init_state))[node] == '0':\n valid_hot_nodes.append(node)\n\n np.random.shuffle(valid_hot_nodes)\n hot_nodes = []\n cur_cost = 0\n for node in valid_hot_nodes:\n neighbors = list(graph.neighbors(node))\n temp_cost = len([n for n in neighbors if n in other_cut_nodes])\n if cur_cost + temp_cost <= max_cuts:\n hot_nodes.append(node)\n cur_cost += temp_cost\n if cur_cost == max_cuts:\n break\n\n return cut_nodes, hot_nodes\n\n\ndef sim_with_cutting(fragments, wire_path_map, frag_shots, backend, mode=\"likely\",\n verbose=0):\n \"\"\"\n A helper function to simulate a fragmented circuit.\n\n Output:\n probs: dict{bitstring : float}\n Outputs a dictionary containing the simulation results. Keys are the\n bitstrings which were observed and their values are the probability that\n they occurred with.\n \"\"\"\n\n # build fragment models\n model_time_start = time.time()\n\n frag_data = qmm.collect_fragment_data(fragments, wire_path_map,\n shots = frag_shots,\n tomography_backend = backend)\n direct_models = qmm.direct_fragment_model(frag_data)\n if mode == \"direct\":\n models = direct_models\n elif mode == \"likely\":\n likely_models = qmm.maximum_likelihood_model(direct_models)\n models = likely_models\n else:\n raise Exception('Unknown recombination mode:', mode)\n\n model_time = time.time() - model_time_start\n\n # recombine models to recover full circuit output\n recombine_time_start = time.time()\n recombined_dist = qmm.recombine_fragment_models(models, wire_path_map)\n recombine_time = time.time() - recombine_time_start\n\n # print timing info\n if verbose:\n print(f\"\\tModel time: {model_time:.3f}, Recombine time: {recombine_time:.3f}\")\n\n return recombined_dist\n","repo_name":"Quantum-Software-Tools/dqva-and-circuit-cutting","sub_path":"utils/cutting_funcs.py","file_name":"cutting_funcs.py","file_ext":"py","file_size_in_byte":6774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"90"} +{"seq_id":"73214221418","text":"from gen_data import gen_data\nfrom cal_shocks import var_reg, cal_shocks, cal_shocks_groups, gen_sep_shocks\nfrom portfolio_construction import port_construct\nfrom factor_tests import factor_tests\nfrom var_with_agg import var_with_agg\nfrom statsmodels.tsa.api import VAR\nfrom statsmodels.tsa.stattools import adfuller\nimport pandas as pd\nimport numpy as np\nfrom statsmodels.tsa.api import VAR\n\n# Settings\nstart_year = 1975 # 数据起点\nend_year = 2020 # 数据终点\n\nvar_list = ['ret_1y', 'bm', 'prof', 'inv', 'd_me', 'mom', 'iv']\n# 'ret_1y', 'bm', 'prof', 'inv', 'd_me', 'mom', 'iv'\n# 'ret_1y', 'bm', 'prof', 'roe', 'inv', 'lev', 'd_me', 'mom', 'iv' # VAR回归使用的变量\nanom_list = ['bm', 'prof', 'inv', 'd_me', 'mom', 'iv']\n# 'bm', 'prof', 'inv', 'd_me', 'mom', 'iv'\n# var_list的一部分,用于构造anomaly portfolio然后做CF和DR shocks分解\nfactor_list = ['bm', 'me', 'prof', 'inv', 'mom', 'iv']\n# 'bm', 'me', 'prof', 'inv', 'mom', 'iv'\n# 'bm', 'mom', 'me', 'iv' # fama-macbeth回归中使用的因子,并且构造用于回归的portfolio\nfactor_port_list = ['bm', 'mom', 'me', 'iv'] # 构造用于回归的portfolio所使用的因子\n\nuse_inv = 'inv5' # investment代理:'inv5', 'inv4', 'inv3', 'inv2', 'inv_adj2', 'inv_adj3', 'inv_adj4', 'inv_adj5'\nuse_dme = 'd5_me' # Size代理:'d5_me', 'd4_me', 'd3_me', 'd2_me'\nuse_mom = 6 # momentum:3, 6, 9, 18\n\n# num_of_lags = {'ret_1y': 4, 'prof': 0, 'roe': 2, 'inv': 0,\n# 'lev': 2, 'bm': 0, 'd_me': 0, 'mom': 0, 'iv': 0} # long VAR: 回归中包含的lag数\n\npseudo_weight = 0.9 # 如果不进行这步定为np.nan\nuse_dlret = 'V02' # delisting returns处理:'V02',\nde_inflation = False\nde_microcap = True\nde_outliers = True\nuse_me = 'V02' # market equity计算方法:'V02',\nuse_be = 'original' # book equity计算方法:'original',\ndata_filter = 'V02' # 根据book equity筛选:'V02',\nroe_lag = 0 # roe = ni/me或l.me:0, 1\nweighting = 'value' # 构造agg变量时加权方法:'equal' or 'value'\ntruncated = True # roe、prof小于-1的是否化为-1:True, False\nsimple_or_ln_return = 'ln' # returns是否视为对数return:'ln', 'simple'\nif_standardize = False # 除returns外的变量是否进行标准化\n\nnum_of_cuts_anom = 5\nnum_of_cuts_factor = 2\nnum_of_iv_groups = 5\n\n# data cleaning, generating aggregate and market-adjusted data\nvar_data, ma_df, agg_df, agg_df_summary, ma_df_summary, var_data_summary = gen_data(start_year=start_year,\n end_year=end_year,\n var_list=var_list,\n use_inv=use_inv, use_dme=use_dme,\n use_mom=use_mom,\n pseudo_weight=pseudo_weight,\n use_dlret=use_dlret,\n de_inflation=de_inflation,\n de_microcap=de_microcap,\n use_me=use_me,\n use_be=use_be,\n data_filter=data_filter,\n roe_lag=roe_lag,\n weighting=weighting,\n truncated=truncated,\n simple_or_ln_return=simple_or_ln_return,\n if_standardize=if_standardize)\n\n# VAR\nagg_model, agg_params, agg_resid, ma_model, ma_params, ma_resid, ma_params_p, ma_resid_p, resid_list = var_reg(\n agg_df=agg_df,\n ma_df=ma_df,\n var_list=var_list)\n\n# calculate DR and CF shocks\nport_df, agg_shocks, ma_params, ma_resid, all_output_df = cal_shocks(var_data=var_data,\n agg_params=agg_params,\n agg_resid=agg_resid,\n ma_params=ma_params,\n ma_resid=ma_resid,\n ma_params_p=ma_params_p,\n ma_resid_p=ma_resid_p,\n resid_list=resid_list)\n\nprint('【all firms, market-adjusted】decomposition results')\nprint(all_output_df)\n\n# calculate the variance, covariance and correlation between CF and DR shocks in different iv groups\n# VAR parameters are the same across groups, while the covariance matrix is different.\ngroup_shocks_df = cal_shocks_groups(ma_df=ma_df,\n var_data=var_data,\n ma_params_p=ma_params_p,\n ma_resid_p=ma_resid_p,\n resid_list=resid_list,\n num_of_iv_groups=num_of_iv_groups)\n\nprint('【in groups, market-adjusted】decomposition results')\nprint(group_shocks_df)\n\n# mixed VAR (using both market-adjusted variables and aggregate variables)\n# consider the influence of aggregate variables on market-adjusted variables but not inversely\n# using the method in V02\nmix_shocks, shocks_df, ivgroup_ret_shocks_mix, var_params, var_Hc0_se, var_bse = var_with_agg(ma_df=ma_df,\n agg_df=agg_df,\n var_data=var_data,\n var_list=var_list,\n num_of_iv_groups=num_of_iv_groups)\n\nprint('【all firms & in groups, market-adjusted + aggregate】decomposition results')\nprint(shocks_df)\n\n# constructing anomaly portfolios\nanom_ret, anom_shocks, fac_port, anom_ret_summary, anomaly_output_df, iv_anom_ret_original, iv_anom_shocks_original = \\\n port_construct(port_df=port_df,\n anom_list=anom_list,\n factor_list=factor_list,\n factor_port_list=factor_port_list)\n\n# do 2-stage fama-macbeth regressions\nret_result, ret_tvalue, ret_result2, ret_tvalue2, ret_result3, ret_tvalue3 = factor_tests(fac_port=fac_port,\n anom_ret=anom_ret,\n anom_shocks=anom_shocks,\n factor_port_list=factor_port_list)\n\nprint('decomposition results of anomaly portfolios(factor-mimicking portfolios)')\nprint(anomaly_output_df.iloc[-2:].reset_index(drop=True))\n\n# Display Results\n# 【market-adjusted only】vw-portfolio & market DR shocks correlation\niv_anom_ret_original['year'] = iv_anom_ret_original.index\niv_anom_ret_original['year'] = iv_anom_ret_original['year'].apply(lambda x: x.year)\niv_anom_ret_original = iv_anom_ret_original.set_index('year')\niv_anom_shocks_original['year'] = iv_anom_shocks_original.index\niv_anom_shocks_original['year'] = iv_anom_shocks_original['year'].apply(lambda x: x.year)\niv_anom_shocks_original = iv_anom_shocks_original.set_index('year')\n\nwithagg_correlation_original = pd.DataFrame(columns=['covariance', 'correlation', 'tvalue'])\nfor col in iv_anom_ret_original.columns:\n df_cov = pd.merge(iv_anom_ret_original[col], agg_shocks['DR_agg'], left_index=True, right_index=True, how='inner')\n df_cov.dropna(axis=0, inplace=True)\n cov = np.cov(df_cov[col], df_cov['DR_agg'])[0, 1]\n var_ret = np.cov(df_cov[col], df_cov['DR_agg'])[0, 0]\n var_agg = np.cov(df_cov[col], df_cov['DR_agg'])[1, 1]\n withagg_correlation_original.loc[col, 'covariance'] = cov\n withagg_correlation_original.loc[col, 'correlation'] = cov / np.sqrt(var_ret) / np.sqrt(var_agg)\nfor col in iv_anom_shocks_original.columns:\n df_cov = pd.merge(iv_anom_shocks_original[col], agg_shocks['DR_agg'], left_index=True, right_index=True,\n how='inner')\n df_cov.dropna(axis=0, inplace=True)\n cov = np.cov(df_cov[col], df_cov['DR_agg'])[0, 1]\n var_ret = np.cov(df_cov[col], df_cov['DR_agg'])[0, 0]\n var_agg = np.cov(df_cov[col], df_cov['DR_agg'])[1, 1]\n withagg_correlation_original.loc[col, 'covariance'] = cov\n withagg_correlation_original.loc[col, 'correlation'] = cov / np.sqrt(var_ret) / np.sqrt(var_agg)\n\nprint('【market-adjusted only】vw-portfolio & market DR shocks correlation')\nprint(withagg_correlation_original)\n\n# 【market-adjusted & aggregate】vw-portfolio & market DR shocks correlation\nivgroup_ret_shocks_mix['year'] = ivgroup_ret_shocks_mix.index\nivgroup_ret_shocks_mix['year'] = ivgroup_ret_shocks_mix['year'].apply(lambda x: x.year)\nivgroup_ret_shocks_mix = ivgroup_ret_shocks_mix.set_index('year')\n\nret_correlation_mix = pd.DataFrame(columns=['covariance', 'correlation', 'tvalue'])\nfor col in ivgroup_ret_shocks_mix.columns:\n df_cov = pd.merge(ivgroup_ret_shocks_mix[col], agg_shocks['DR_agg'], left_index=True, right_index=True, how='inner')\n df_cov.dropna(axis=0, inplace=True)\n cov = np.cov(df_cov[col], df_cov['DR_agg'])[0, 1]\n var_ret = np.cov(df_cov[col], df_cov['DR_agg'])[0, 0]\n var_agg = np.cov(df_cov[col], df_cov['DR_agg'])[1, 1]\n ret_correlation_mix.loc[col, 'covariance'] = cov\n ret_correlation_mix.loc[col, 'correlation'] = cov / np.sqrt(var_ret) / np.sqrt(var_agg)\n\nprint('【market-adjusted & aggregate, return】vw-portfolio & market DR shocks correlation')\nprint(ret_correlation_mix)\n\nprint('FMB regression results: returns')\nprint(ret_tvalue)\nprint('FMB regression results: shocks')\nprint(ret_tvalue2)\nprint('FMB regression results: returns&shocks')\nprint(ret_tvalue3)\n\n","repo_name":"youjing18/iv-anomaly-decomposition","sub_path":"all_operations.py","file_name":"all_operations.py","file_ext":"py","file_size_in_byte":10691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"37908579883","text":"import pygame\n\npygame.init() #초기화\n\n# 화면 크기 설정\nscreen_width = 480\nscreen_height = 640\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n# 화면 타이틀 설정\npygame.display.set_caption(\"Game Practice\")\n\n# 이벤트 루프\nrunning = True # 게임이 진행중인가?\nwhile running:\n for event in pygame.event.get(): # 어떤이벤트가 발생하엿는가\n if event.type == pygame.QUIT: # 창이 닫히는 이벤트가 발생하엿는가?\n running = False # 게임진행중아님\n\n# 게임종료\npygame.quit()","repo_name":"jiheon788/python-game","sub_path":"base/1_create_frame.py","file_name":"1_create_frame.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"33646920554","text":"from turtle import *\nfrom pifiglet import Figlet\nimport colorama\nimport time\n\nf = Figlet(font='standard')\n\nwhile True:\n\tprint(Fore.VIOLET + f.renderText('SHAPER') + Style.RESET_ALL)\n\tprint(\"Shaper V1.1\")\n\tprint(\" \")\n\ts=input(\"Введите цвет лининий на английском языке: \")\n\tx=input(\"Введите цвет заливки на английском языке: \")\n\tprint(\" \")\n\ty=input(\"Введите толщину линий в пикселях: \")\n\tn=int(input(\"Введите количество сторон: \"))\n\tf=int(input(\"Введите размер длин: \"))\n\tprint(\" \")\n\ti=0\n\twidth(y)\n\tcolor(s)\n\tbegin_fill()\n\twhile i 0:\n # for not NULL cases (explicit targets)\n true_tgt_pol.add(f\"{t_tgt[0]} - {t_tgt[-1]} ~ {t_pol}\")\n if implicit and len(t_tgt) == 0:\n # for NULL cases (implicit targets)\n true_tgt_pol.add(f\"0 - 0 ~ {t_pol}\")\n\n for p_pol, p_tgt in zip(pred_pol, pred_target_idx):\n if len(p_tgt) > 0:\n pred_tgt_pol.add(f\"{p_tgt[0]} - {p_tgt[-1]} ~ {p_pol}\")\n if implicit and len(p_tgt) == 0:\n pred_tgt_pol.add(f\"0 - 0 ~ {p_pol}\")\n\n common_tgt_pol = true_tgt_pol & pred_tgt_pol\n\n return true_tgt_pol, pred_tgt_pol, common_tgt_pol\n\n\ndef convert_pred_to_TAS_format(truth, preds, gold_xml_file=None):\n new_preds = []\n trimmed_preds = []\n num_trimmed_sentences = 0\n for pred in preds:\n new_pred = []\n trim_flag = True\n for p in pred:\n if phr_sen == '':\n if eval_task == 'TSD':\n match = re.match(r\"(The review expressed (\\[(positive|negative|neutral)\\] opinion for \\[(.+?)\\](, )*)+)\", p)\n else:\n match = re.match(r\"(The review expressed (opinion for \\[(.+?)\\](, )*)+)\", p)\n else:\n if eval_task == 'TSD':\n match = re.match(r\"((.*(\\s\\s)(positive|negative|neutral)(\\s)*)+)\", p)\n else:\n match = re.match(r\"((.*(\\s\\s)*)+)\", p)\n\n if match:\n out = match.groups()[0].strip().strip(\",\")\n new_pred.append(out)\n else:\n new_pred.append(\"\")\n\n if p != new_pred[-1]:\n if new_pred[-1] == \"\":\n trimmed_preds.append(\"--------------\")\n else:\n trimmed_preds.append(p + \"\\nchanged pred: \\n\" + new_pred[-1])\n if trim_flag:\n trim_flag = False\n num_trimmed_sentences += 1\n new_preds.append(new_pred)\n\n with open('trimmed_preds1.txt', \"w+\") as f:\n f.write(f\"Number of trimmed sentences={num_trimmed_sentences}\\n\\n\")\n f.write(\"\\n\\n\".join(trimmed_preds))\n\n preds = new_preds\n\n if not os.path.exists(\"predictions\"):\n os.mkdir(\"predictions\")\n\n # Saving the predictions if needed\n with open(f\"predictions/{eval_task}_{dir_prefix}_predictions_{datetime.now()}.txt\", \"w\") as f:\n for i, text in enumerate(df[\"input_text\"].tolist()):\n f.write(str(text) + \"\\n\\n\")\n\n f.write(\"Truth:\\n\")\n f.write(truth[i] + \"\\n\\n\")\n\n f.write(\"Prediction:\\n\")\n for pred in preds[i]:\n f.write(str(pred) + \"\\n\")\n f.write(\"________________________________________________________________________________\\n\")\n\n # exit(1)\n\n def getsubidx(x, y):\n l1, l2 = len(x), len(y)\n for i in range(l1):\n if x[i:i + l2] == y:\n return i\n return -1\n\n # get the gold annotations for the aspect-sentiment, yes_no, ner_tags from the TAS-BERT test file\n gold_df = pd.read_csv(f'data/{dataset}/test_TAS.tsv', sep=\"\\t\")\n gold_id = gold_df[\"sentence_id\"].tolist()\n\n for pred_offset in range(3):\n # get the input text ids, and input text from the text_gen test set for this task\n input_text = df[\"input_text\"].tolist()\n dup_count = 0\n longest_prefix_count = 0\n\n # clear the gold opinions and get the empty framework\n sen_tree_map = {}\n xml_tree = ET.parse(gold_xml_file)\n root = xml_tree.getroot()\n\n for node in root.iter('Review'):\n for sen in node.iter('sentence'):\n for elem in sen.iter():\n if elem.tag == 'sentence':\n sen_key = elem.attrib['id']\n sen_tree_map[sen_key] = sen\n if elem.tag == 'Opinions':\n if elem is not None:\n elem.clear()\n\n Common_Num_imp = 0\n True_Num_imp = 0\n Pred_Num_imp = 0\n Common_Num_exp = 0\n True_Num_exp = 0\n Pred_Num_exp = 0\n for idx, inp_text in enumerate(input_text):\n wrong_flag = False\n num_combinations = 36 if dataset == 'semeval-2016' else 39\n sentence_id = list(set(gold_id[idx * num_combinations: (idx + 1) * num_combinations]))\n\n assert len(sentence_id) == 1, \"************ 2 different sentence ids ***************\"\n sentence_id = sentence_id[0]\n\n current_sen = sen_tree_map[sentence_id]\n current_opinions = current_sen.find('Opinions')\n if current_opinions == None:\n current_opinions = ET.Element('Opinions')\n current_sen.append(current_opinions)\n\n # extract true and predicted aspect categories adn the polarities\n if phr_sen == '':\n true_target = re.findall(r\"opinion for \\[(.+?)\\]\", truth[idx])\n pred_target = re.findall(r\"opinion for \\[(.+?)\\]\", preds[idx][pred_offset])\n else:\n true_target = [each_op.split(\" ~ \")[0] for each_op in truth[idx].split(\" ~~ \")]\n pred_target = [tgt_asp_pol for op_idx, tgt_asp_pol in enumerate(preds[idx][pred_offset].split(\" \")) if\n op_idx % 2 == 0 and preds[idx][pred_offset] != '']\n\n if eval_task == 'TSD':\n if phr_sen == '':\n true_pol = re.findall(r\" \\[([A-Za-z]+)\\] opinion for\", truth[idx])\n pred_pol = re.findall(r\" \\[([A-Za-z]+)\\] opinion for\", preds[idx][pred_offset])\n else:\n true_pol = [each_op.split(\" ~ \")[1] for each_op in truth[idx].split(\" ~~ \")]\n pred_pol = [tgt_asp_pol for op_idx, tgt_asp_pol in enumerate(preds[idx][pred_offset].split(\" \")) if\n op_idx % 2 == 1 and preds[idx][pred_offset] != '']\n\n # If any aspect polarity is dropped by any chance, then, we have to exclude that respective\n # target also\n if len(pred_pol) != len(pred_target):\n pred_target = pred_target[:len(pred_pol)]\n\n assert len(true_pol) == len(true_target)\n assert len(pred_pol) == len(pred_target)\n\n true_target_idx = []\n for each_target in true_target:\n if each_target != 'NULL':\n sub_idx = getsubidx(inp_text.split(), each_target.split())\n if inp_text.count(each_target) > 1:\n dup_count += 1\n # print(f\"{dup_count}: Target: {each_target}\\nText: {inp_text}\\n\\n\")\n if sub_idx != -1:\n true_target_idx.append(\n [it for it in range(sub_idx, (sub_idx + len(each_target.split())))])\n else:\n true_target_idx.append([])\n else:\n true_target_idx.append([])\n\n pred_target_idx = []\n for each_target in pred_target:\n if each_target != 'NULL':\n\n # clean the target word before finding it's index\n # The intuition is changing the word \"Ray' s\" ----> \"Ray ' s\"\n tgt = clean_str(each_target)\n if each_target != tgt:\n # print(f\"changing '{each_target}' to '{tgt}'\\n\")\n each_target = tgt\n sub_idx = getsubidx(inp_text.split(), each_target.split())\n if sub_idx != -1:\n pred_target_idx.append(\n [it for it in range(sub_idx, (sub_idx + len(each_target.split())))])\n else:\n pred_target_idx.append([])\n else:\n pred_target_idx.append([])\n\n # verify if number of polarities == number of targets\n if eval_task == 'TSD':\n assert len(true_pol) == len(true_target)\n assert len(pred_pol) == len(pred_target)\n\n true_tgt_pol_imp, pred_tgt_pol_imp, common_tgt_pol_imp = compute_F1_for_TSD(true_target_idx, pred_target_idx, true_pol, pred_pol, implicit=True)\n true_tgt_pol_exp, pred_tgt_pol_exp, common_tgt_pol_exp = compute_F1_for_TSD(true_target_idx, pred_target_idx, true_pol, pred_pol, implicit=False)\n\n True_Num_imp += len(true_tgt_pol_imp)\n Pred_Num_imp += len(pred_tgt_pol_imp)\n Common_Num_imp += len(common_tgt_pol_imp)\n True_Num_exp += len(true_tgt_pol_exp)\n Pred_Num_exp += len(pred_tgt_pol_exp)\n Common_Num_exp += len(common_tgt_pol_exp)\n\n # to generate the XML file for TD evaluation\n gold_sentence = inp_text.split()\n xml_sentence = current_sen.find('text').text\n\n for each_tgt_idx in pred_target_idx:\n if len(each_tgt_idx) == 0:\n op = ET.Element('Opinion')\n op.set('target', 'NULL')\n op.set('category', \"\")\n op.set('polarity', \"\")\n op.set('from', '0')\n op.set('to', '0')\n current_opinions.append(op)\n else:\n # for x in pred_target_idx:\n start = each_tgt_idx[0]\n end = len(each_tgt_idx) + start\n target_sub_seq = gold_sentence[start: end]\n while '(' in target_sub_seq:\n target_sub_seq[target_sub_seq.index('(')] = '\\('\n while ')' in target_sub_seq:\n target_sub_seq[target_sub_seq.index(')')] = '\\)'\n while '$' in target_sub_seq:\n target_sub_seq[target_sub_seq.index('$')] = '\\$'\n target_match = re.compile('\\\\s*'.join(target_sub_seq))\n # target_match = re.compile('\\\\s*'.join(sentence[start:end]))\n sentence_org = ' '.join(gold_sentence)\n target_match_list = re.finditer(target_match, sentence_org)\n true_idx = 0\n for m in target_match_list:\n if start == sentence_org[0:m.start()].count(' '):\n break\n true_idx += 1\n\n target_match_list = re.finditer(target_match, xml_sentence)\n match_list = []\n for m in target_match_list:\n match_list.append(str(m.start()) + '###' + str(len(m.group())) + '###' + m.group())\n if len(match_list) < true_idx + 1:\n print(\"Error!!!!!!!!!!!!!!!!!!!!!\")\n print(len(match_list))\n print(target_match)\n print(sentence_org)\n else:\n info_list = match_list[true_idx].split('###')\n target = info_list[2]\n from_idx = info_list[0]\n to_idx = str(int(from_idx) + int(info_list[1]))\n op = ET.Element('Opinion')\n op.set('target', target)\n op.set('category', \"\")\n op.set('polarity', \"\")\n op.set('from', from_idx)\n op.set('to', to_idx)\n current_opinions.append(op)\n\n if eval_task == 'TSD':\n P = Common_Num_exp / float(Pred_Num_exp) if Pred_Num_exp != 0 else 0\n R = Common_Num_exp / float(True_Num_exp)\n F = (2 * P * R) / float(P + R) if P != 0 else 0\n\n print('TSD task ignoring NULL:')\n print(\"\\tP: \", P, \" R: \", R, \" F1: \", F)\n print('----------------------------------------------------\\n\\n')\n\n P = Common_Num_imp / float(Pred_Num_imp) if Pred_Num_imp != 0 else 0\n R = Common_Num_imp / float(True_Num_imp)\n F = (2 * P * R) / float(P + R) if P != 0 else 0\n\n print('TSD task including NULL:')\n print(\"\\tP: \", P, \" R: \", R, \" F1: \", F)\n print('----------------------------------------------------\\n\\n')\n\n xml_string = ET.tostring(root)\n xml_write = DOM.parseString(xml_string)\n with open(f'evaluation_for_AD_TD_TAD/{eval_task}{run}_{dir_prefix}_sentence{pred_offset}.xml', 'w') as handle:\n xml_write.writexml(handle, indent=' ', encoding='utf-8')\n print(f\"\\n\\n\\n*******\\nGenarated target XML: {eval_task}{run}_{dir_prefix}_sentence{pred_offset}.xml'\\n*********\\n\\n\")\n\n\nmodel_args = {\n \"overwrite_output_dir\": True,\n \"max_seq_length\": 512,\n \"eval_batch_size\": 8,\n \"use_multiprocessing\": False,\n \"use_multiprocessing_for_evaluation\": False,\n \"use_multiprocessed_decoding\": False,\n \"num_beams\": None,\n \"do_sample\": True,\n \"max_length\": 512,\n \"top_k\": 50,\n \"top_p\": 0.95,\n \"num_return_sequences\": 3,\n}\n\n# Load the evaluation data\n\ndataset = sys.argv[1]\neval_task = sys.argv[2]\nphr_sen = \"\" if sys.argv[3] == 'sentence' else '_phrase'\nrun = sys.argv[4]\nmodel_size = \"base\"\ndir_prefix = f\"{dataset}{phr_sen}\"\nprint(f\"dataset: {dataset}\\ntask: {eval_task}\\nphr_sen: {phr_sen}\\nrun: {run}\")\n\ndf = pd.read_csv(f'data/{dataset}/test_{eval_task}{phr_sen}.csv')\n\ntasks = df[\"prefix\"].tolist()\n# analysis = False\nanalysis = True\n\nif not analysis:\n # Load the trained model\n # model = T5Model(\"t5\", \"outputs\", args=model_args)\n model = T5Model(\"t5\", f\"results/{eval_task}{run}_{dir_prefix}/\", args=model_args,\n use_cuda=False if not torch.cuda.is_available() else True)\n\n # Prepare the data for testing\n to_predict = [\n prefix + \": \" + str(input_text) for prefix, input_text in zip(df[\"prefix\"].tolist(), df[\"input_text\"].tolist())\n ]\n truth = df[\"target_text\"].tolist()\n\n # Get the model predictions\n preds = model.predict(to_predict)\n\n print(\"\\n\".join(preds[0]))\n\n with open(f'{eval_task}{run}_{dir_prefix}_truth.pkl', \"wb\") as f:\n pickle.dump(truth, f)\n with open(f'{eval_task}{run}_{dir_prefix}_preds.pkl', \"wb\") as f:\n pickle.dump(preds, f)\n\nelse:\n with open(f'{eval_task}{run}_{dir_prefix}_truth.pkl', \"rb\") as f:\n truth = pickle.load(f)\n with open(f'{eval_task}{run}_{dir_prefix}_preds.pkl', \"rb\") as f:\n preds = pickle.load(f)\n\n# Saving the predictions if needed\nconvert_pred_to_TAS_format(truth, preds, f\"evaluation_for_AD_TD_TAD/ABSA{15 if '15' in dataset else 16}_Restaurants_Test.xml\")\n\npreds = [pred[0] for pred in preds]\ndf[\"predicted\"] = preds\n\noutput_dict = {each_task: {\"truth\": [], \"preds\": [], } for each_task in tasks}\nprint(output_dict)\n\nresults_dict = {}\n\nfor task, truth_value, pred in zip(tasks, truth, preds):\n output_dict[task][\"truth\"].append(truth_value)\n output_dict[task][\"preds\"].append(pred)\n# print(output_dict)\n\nprint(\"-----------------------------------\")\nprint(\"Results: \")\nfor task, outputs in output_dict.items():\n if task == f\"Semeval {eval_task}\" or task == eval_task:\n try:\n task_truth = output_dict[task][\"truth\"]\n task_preds = output_dict[task][\"preds\"]\n # print(\"computing metrics\")\n results_dict[task] = {\n \"F1 Score\": f1(task_truth, task_preds) if (\n eval_task == \"Semeval AD\" or eval_task == \"Semeval ASD\") else \"Not Applicable\",\n \"Exact matches\": exact(task_truth, task_preds),\n }\n print(f\"Scores for {task}:\")\n print(\n f\"F1 score: {f1(task_truth, task_preds) if (eval_task == 'Semeval AD' or eval_task == 'Semeval ASD') else 'Not Applicable'}\")\n print(f\"Exact matches: {exact(task_truth, task_preds)}\")\n print()\n except:\n pass\n\n# with open(f\"results/result_{datetime.now()}.json\", \"w\") as f:\n# json.dump(results_dict, f)\n","repo_name":"Sampreeth-sarma/T5-ABSA-Summarization","sub_path":"T5_SemEval_Test_TSD.py","file_name":"T5_SemEval_Test_TSD.py","file_ext":"py","file_size_in_byte":19029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"15105277801","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 25 05:51:38 2018\n\n@author: avinash\n\nPalindrome checker script using a Deque\n\n\"\"\"\n\nfrom deque import Deque\n\ndef isPalindrome(string):\n \n palDQ = Deque()\n \n result = False\n \n for ch in string:\n palDQ.addRear(ch)\n \n while palDQ.size() > 1:\n if palDQ.removeFront() == palDQ.removeRear():\n result = True\n print(palDQ.items)\n else:\n break\n \n return result\n \n\nif __name__ == \"__main__\":\n \n string = \"malayala\"\n \n print(isPalindrome(string))\n","repo_name":"sir-avinash/coding-practice","sub_path":"problems/palindromeChecker.py","file_name":"palindromeChecker.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"6229587827","text":"# Surrounded Regions (Leetcode 130)\n\n# Given an m x n matrix containing \"X\" and \"O\", capture\n# all the regions that are 4-directionally surrounded by \"X\".\n\n# A region is captured by flipping all \"O\"'s into \"X\"'s in that\n# surrounded region.\n\n# Notice that an \"O\" can't be flipped if either of the following\n# is satisfied:\n# - It is on the border;\n# - It is adjacent to an \"O\" that should not be flipped.\n\nfrom collections import deque\n\nclass Solution:\n def solve(self, board):\n # Idea: iterate through the grid. Save all \"O\"'s in a set for easy access. \n # The set designates nodes to be flipped. Save all border \"O\"'s in a second\n # set: these are the possible starting points to find islands that should not\n # be flipped.\n # Perform BFS around \"O\"'s that are on the borders. Delete all\n # \"O\"'s belonging to these islands from the to_flip set (they should\n # not be flipped). \n # Flip all the \"O\"'s remaining in the set!\n # Yes, we do need extra space to store the two sets (both sets max out at size n*m),\n # but we gain an advantage in terms of time complexity since we never have to\n # look at \"X\" nodes during breadth-first search. Moreover, looking up an element\n # in a set or removing it are both O(1) time — very convenient.\n max_vert = len(board)\n max_hor = len(board[0])\n to_flip = set()\n on_borders = set()\n\n dirs = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n\n def bfs(r, c):\n\n if (r, c) not in to_flip:\n return\n \n q = deque([(r, c)])\n while q:\n row, col = q.popleft()\n if (row, col) in to_flip:\n to_flip.remove((row, col))\n for r_dir, c_dir in dirs:\n new_row, new_col = row + r_dir, col + c_dir\n q.append((new_row, new_col))\n\n for r in range(max_vert):\n for c in range(max_hor):\n if board[r][c] == \"O\":\n to_flip.add((r,c))\n if r == 0 or c == 0 or r == max_vert-1 or c == max_hor-1:\n on_borders.add((r, c))\n \n for r, c in on_borders:\n bfs(r, c)\n\n for r, c in to_flip:\n board[r][c] = \"X\"\n \n print(board)\n return\n\n # Where n and m are the grid dimensions,\n # - time complexity is O(n*m) to iterate through the whole grid +\n # perform manipulations with sets derived from the grid.\n # - space complexity is O(n*m). We need O(n*m) space for each of the\n # two sets and O(n*m) space for the queue used in breadth-first search.\n\nif __name__ == \"__main__\":\n my = Solution()\n print(my.solve([[\"X\",\"X\",\"X\",\"X\"],[\"X\",\"O\",\"O\",\"X\"],[\"X\",\"X\",\"O\",\"X\"],[\"X\",\"O\",\"X\",\"X\"]]))\n print(my.solve([[\"X\"]]))","repo_name":"kseniiako/dsa","sub_path":"surroundedRegions.py","file_name":"surroundedRegions.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"25682387383","text":"\"\"\"Convenience methods to retrieve meta data from .rtdc files\"\"\"\nimport functools\nimport pathlib\n\nimport dclab\n\nimport numpy as np\n\n\nclass dataset_monitoring_lru_cache:\n \"\"\"Decorator for caching RT-DC data extracted from DCOR or files\n\n This is a modification of dclab.util.file_monitoring_lru_cache\n with an exception that when the `path` starts with \"https://\",\n then caching is done as well.\n \"\"\"\n def __init__(self, maxsize=100):\n self.lru_cache = functools.lru_cache(maxsize=maxsize)\n self.cached_wrapper = None\n\n def __call__(self, func):\n @self.lru_cache\n def cached_wrapper(path, path_stats, *args, **kwargs):\n assert path_stats, \"We need stat for validating the cache\"\n return func(path, *args, **kwargs)\n\n @functools.wraps(func)\n def wrapper(path, *args, **kwargs):\n local_path = pathlib.Path(path)\n if local_path.exists():\n full_path = local_path.resolve()\n path_stat = full_path.stat()\n return cached_wrapper(\n path=full_path,\n path_stats=(path_stat.st_mtime_ns, path_stat.st_size),\n *args,\n **kwargs)\n elif isinstance(path, str) and path.startswith(\"https://\"):\n # DCOR metadata does not change\n return cached_wrapper(\n path=path,\n path_stats=\"placeholder\",\n *args,\n **kwargs)\n else:\n return func(path, *args, **kwargs)\n\n wrapper.cache_clear = cached_wrapper.cache_clear\n wrapper.cache_info = cached_wrapper.cache_info\n\n return wrapper\n\n\ndef get_info(path, section, key):\n config = get_rtdc_config(path)\n return config[section][key]\n\n\ndef get_repr(path, append_path=False):\n \"\"\"representative string of a dataset\"\"\"\n exp = get_rtdc_config(path)[\"experiment\"]\n rep = \"{} #{} ({} {})\".format(exp[\"sample\"],\n exp[\"run index\"],\n exp[\"date\"],\n exp[\"time\"])\n if append_path:\n rep += \"\\n{}\".format(path)\n return rep\n\n\n@dataset_monitoring_lru_cache(maxsize=100)\ndef get_rtdc_config(path):\n with dclab.new_dataset(path) as ds:\n config = ds.config.copy()\n return config\n\n\n@dataset_monitoring_lru_cache(maxsize=100)\ndef get_rtdc_features(path, scalar=True, only_loaded=False):\n \"\"\"Return available features in a dataset\"\"\"\n av_feat = []\n with dclab.new_dataset(path) as ds:\n if scalar:\n features = ds.features_scalar\n else:\n features = ds.features\n for feat in features:\n if only_loaded:\n if feat in ds.features_loaded:\n av_feat.append(feat)\n else:\n if feat in ds:\n av_feat.append(feat)\n return av_feat\n\n\ndef get_rtdc_features_bulk(paths, scalar=True):\n \"\"\"Return available features for a list of dataset paths\"\"\"\n features = []\n for pp in paths:\n features += get_rtdc_features(path=pp, scalar=scalar)\n return sorted(set(features))\n\n\n@dataset_monitoring_lru_cache(maxsize=10000)\ndef get_rtdc_features_minmax(path, *features):\n \"\"\"Return dict with min/max of scalar features in a dataset\"\"\"\n mmdict = {}\n with dclab.new_dataset(path) as ds:\n if len(features) == 0:\n features = ds.features_loaded\n for feat in features:\n assert dclab.dfn.scalar_feature_exists(feat)\n if feat in ds:\n mmdict[feat] = np.min(ds[feat]), np.max(ds[feat])\n return mmdict\n\n\ndef get_rtdc_features_minmax_bulk(paths, features=None):\n \"\"\"Perform `get_rtdc_features_minmax` on a list of paths\n\n Parameters\n ----------\n paths: list of str or list of pathlib.Path\n Paths to measurement files\n features: list of str or empty list\n Names of the features to compute the min/max values for.\n If empty, all loaded features will be used.\n \"\"\"\n if features is None:\n features = []\n mmdict = {}\n for pp in paths:\n mmdi = get_rtdc_features_minmax(pp, *features)\n for feat in mmdi:\n if feat in mmdict:\n fmin = min(mmdict[feat][0], mmdi[feat][0])\n fmax = max(mmdict[feat][1], mmdi[feat][1])\n mmdict[feat] = (fmin, fmax)\n else:\n mmdict[feat] = mmdi[feat]\n return mmdict\n","repo_name":"ZELLMECHANIK-DRESDEN/ShapeOut2","sub_path":"shapeout2/meta_tool.py","file_name":"meta_tool.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"90"} +{"seq_id":"12393260071","text":"# This file suppoers a Order-preserving encryption (ope).\n# Reference:\n# 1. http://www.cc.gatech.edu/~aboldyre/papers/bclo.pdf\nimport random\n\ndef getOPEKey(nLength = 2048):\n randFun = random.SystemRandom()\n n = None\n nLen = 0\n while (nLen != nLength):\n n = randFun.getrandbits(nLength)\n nLen = n.bit_length()\n return n\n","repo_name":"SocializedPolicyAdministration/SPAServer","sub_path":"KeyManager/Util/OPE.py","file_name":"OPE.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"18894640452","text":"\"\"\"\n Django database models for Threads, Messages and Ratings\n\n classes\n Threads:\n A thread of messages between two users\n Messages:\n A message between two users in a thread\n Ratings:\n A rating given to a user by another user\n\n\"\"\"\n\nfrom django.db import models\nfrom pu.settings import AUTH_USER_MODEL\nfrom django.utils import timezone\n\n\nclass Thread(models.Model):\n \"\"\"\n A thread of messages between two users\n\n Attributes\n threadid:\n The ID of the thread\n user1:\n The first user in th thread\n user2:\n The second user in the thread\n\n Functions\n __str__: str\n toString returning the username of user1 and user2\n get_threadid: int\n Returning the id of the thread\n\n \"\"\"\n threadid = models.AutoField(db_column='threadID', primary_key=True) # Field name made lowercase.\n user1 = models.ForeignKey(AUTH_USER_MODEL, models.DO_NOTHING, db_column='user1', related_name=\"user_1\")\n user2 = models.ForeignKey(AUTH_USER_MODEL, models.DO_NOTHING, db_column='user2', related_name=\"user_2\")\n\n class Meta:\n managed = True\n db_table = 'thread'\n app_label = \"contact\"\n\n def __str__(self):\n \"\"\" Returns the username of user1 and user2 \"\"\"\n return self.user1.username + \" \" + self.user2.username\n\n def get_threadid(self):\n \"\"\" Returns the id of the thread \"\"\"\n return self.threadid\n\n\nclass Messages(models.Model):\n \"\"\"\n A message between two users in a thread\n\n Attributes\n messageid:\n The ID of the message\n message:\n The message\n sent:\n Timestamp of when the message was sent\n thread:\n The thread the message belongs to\n sentto:\n The user the message is sent to\n sentfrom:\n The user the message is sent from\n\n Functions\n __str__: str\n toString returning the message\n publish: None\n Sets sent to current time\n \"\"\"\n messageid = models.AutoField(db_column='messageID', primary_key=True) # Field name made lowercase.\n message = models.TextField()\n sent = models.DateTimeField(blank=True, null=True)\n thread = models.ForeignKey('Thread', models.DO_NOTHING, db_column='thread')\n sentto = models.ForeignKey(AUTH_USER_MODEL, models.DO_NOTHING, db_column='sentTo',\n related_name=\"message_sent_to\") # Field name made lowercase.\n sentfrom = models.ForeignKey(AUTH_USER_MODEL, models.DO_NOTHING, db_column='sentFrom',\n related_name=\"message_sent_from\") # Field name made lowercase.\n\n class Meta:\n managed = True\n db_table = 'messages'\n app_label = \"contact\"\n\n def __str__(self):\n \"\"\" Returns the message \"\"\"\n return self.message\n\n def publish(self):\n \"\"\" Sets sent to timezone.now(). Returns None\"\"\"\n self.sent = timezone.now()\n self.save()\n\n\nclass Ratings(models.Model):\n \"\"\"\n A rating given to a user by another user\n\n Attributes\n ratingid:\n The ID of the rating\n rated:\n The user being rated\n ratedby:\n The user who rated\n score:\n The score the rated was given\n \"\"\"\n ratingid = models.AutoField(db_column='ratingid', primary_key=True)\n rated = models.ForeignKey(AUTH_USER_MODEL, models.DO_NOTHING, db_column='rated', related_name=\"user_rated\")\n ratedby = models.ForeignKey(AUTH_USER_MODEL, models.DO_NOTHING, db_column='ratedBy',\n related_name=\"user_rated_by\") # Field name made lowercase.\n score = models.IntegerField()\n\n class Meta:\n managed = True\n db_table = 'ratings'\n app_label = \"contact\"\n","repo_name":"Mathipe98/TDT4140_project","sub_path":"src/contact/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"30527537303","text":"import torch\nfrom torch import nn\nimport torchvision\nimport os\nimport struct\nfrom torchsummary import summary\n\ndef main():\n print('cuda device count: ', torch.cuda.device_count())\n net = torch.load('../Pytorch-UNet/models/unet_carvana_scale1_epoch5.pth')\n\n f = open(\"unet.wts\", 'w')\n f.write(\"{}\\n\".format(len(list(net))))\n for k,v in net.items():\n print('key: ', k)\n print('value: ', v.shape)\n vr = v.reshape(-1).cpu().numpy()\n f.write(\"{} {}\".format(k, len(vr)))\n for vv in vr:\n f.write(\" \")\n f.write(struct.pack(\">f\", float(vv)).hex())\n f.write(\"\\n\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"nanmi/xnet-trt-assets","sub_path":"unet/gen_wts.py","file_name":"gen_wts.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"17606768055","text":"import context\n\nfrom torch.utils.data import DataLoader\n\nimport lantern.models as models\nimport lantern.models.activations as activations\nfrom lantern.utils.visualization import ImageFittingSummaryWriter\n\nimport lantern.losses as losses\nimport lantern.datasets as datasets\nimport lantern.training as training\n\nimport configargparse\n\np = configargparse.ArgumentParser()\n\np.add_argument('--id', type=str, required=False,\n help='Identifier for this experiment',\n default='img_fitting')\n\np.add_argument('--img_path', type=str, required=False,\n help='Image to be used for training',\n default='./data/image/mug.jpeg')\n\nif __name__ == '__main__':\n\n args = p.parse_args()\n\n resolution = (96, 128)\n\n # Load dataset\n dataset = datasets.SingleImageDataset(\n filename=args.img_path, sidelength=resolution)\n\n dataloader = DataLoader(dataset, shuffle=True,\n batch_size=1, pin_memory=True, num_workers=0)\n\n input, output = dataset[0]\n\n # import matplotlib.pyplot as plt\n # plt.imshow(output['intensities'].reshape((*resolution, 3)))\n # plt.show()\n\n # sys.exit()\n\n # Select model\n model = models.FullyConnectedBlock(\n in_features=1, out_features=1,\n num_hidden_layers=0, hidden_features=2,\n activation=activations.Sine(),\n parse_input_dict_fn=lambda input_dict: input_dict['coordinates'],\n build_output_dict_fn=lambda input, output: {\n 'coordinates': input,\n 'intensities': output\n },\n )\n\n # Train model\n trainer = training.ModelTrainer(\n model,\n dataloader,\n output_prefix=args.id,\n summary_writer_class=ImageFittingSummaryWriter,\n ).train(\n learning_rate=0.01,\n loss_fn=losses.image_mse_loss,\n steps_until_summary=5,\n num_epochs=1000,\n )\n","repo_name":"edufschmidt/neural-implicit-models-sandbox","sub_path":"scripts/image.train.py","file_name":"image.train.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18237400748","text":"import numpy as np\nimport argparse\nimport os \nimport errno\n\ntry:\n os.mkdir('metrics/average_precision/')\nexcept OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n pass\n\nparser = argparse.ArgumentParser(\n description='Compute Average Precision Per Class')\nparser.add_argument('--conf_mat_path', type=str)\nparser.add_argument('--fog_intensity', type=str)\nargs = parser.parse_args()\nconf_mat = np.loadtxt(args.conf_mat_path, dtype=float, delimiter=',')\n\nclass_ap = np.zeros((conf_mat.shape[0], ))\nclass_ids = ['road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation',\n 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motor cycle', 'bicycle', 'unlabeled']\n\nf_ = open('metrics/average_precision/avg_precision_fog_intensity_' + args.fog_intensity + '.txt', 'w+')\nfor i in range(conf_mat.shape[0]):\n class_ap[i] = conf_mat[i, i]\n row_sum = np.sum(conf_mat[i])\n class_ap[i] /= row_sum\n f_.write(class_ids[i] + \" \" + str(class_ap[i]) + '\\n')\n\nf_.close()","repo_name":"karan2808/Semantic-Segmentation-Models","sub_path":"metrics/compute_ap.py","file_name":"compute_ap.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"90"} +{"seq_id":"9275273007","text":"\"\"\"\nSecond module: data analysis.\nD. Cordier, CNRS? France, January 2023\nhttps://orcid.org/0000-0003-4515-6271\nLicence: GPLv3\n\"\"\"\n# -----------------------------------------------------------------------------------------------------------------------------------\n#\n# Python module of data extraction from VIMS cube for IR photometric uncertainties\n#\n# -----------------------------------------------------------------------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nfrom pyvims import VIMS\n\nimport matplotlib.colors as colors\n\nfrom matplotlib.patches import Rectangle\n\nfrom matplotlib.ticker import AutoMinorLocator\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef plot_boxes_map(bg, my_lon, my_lat, figname):\n \"\"\"\n Plot the location of 3x3 pixels boxes over a map of Titan surface.\n Intputs:\n - bg (image) ---------------: image of the background map.\n - my_lon (numpy array) -----: longitudes.\n - my_lat (numpy array) -----: latitudes.\n - figname (string) ---------: name of the PNG, PDF, ... file in which the figure is saved.\n \"\"\"\n fig, ax = plt.subplots(figsize=(15*1.3, 6*1.3))\n\n ax.imshow(bg, extent=[360, 0, -90, 90], cmap='gray')\n\n mes_lon = np.reshape(my_lon, (my_lon.shape[0]))\n mes_lat = np.reshape(my_lat, (my_lat.shape[0]))\n\n ax.set_xlabel('Longitude (°)')\n ax.set_ylabel('Latitude (°)')\n\n ax.set_xticks(np.arange(0, 361, 30))\n ax.set_yticks(np.arange(-90, 91, 30))\n\n ax.set_xticklabels([f'{lon}°W' for lon in np.arange(0, 361, 30)]);\n ax.set_yticklabels([f'{lat}°N' if lat > 0 else f'{-lat}°S' if lat < 0 else 'Eq.' for lat in np.arange(-90, 91, 30)])\n\n hmin= 0\n hmax= 50\n\n h = ax.hist2d(mes_lon, mes_lat, bins=[360, 180], cmap='winter', \\\n norm=colors.PowerNorm(gamma=1. / 5.), cmin=0.001, cmax=hmax)\n\n # Site Huygens\n lat_Huyg = 191\n lon_Huyg = -10.6 # Latitude SUD\n ax.scatter(lat_Huyg, lon_Huyg, s=80, marker='s', c='red')\n\n # Cratère Selk (site Dragonfly)\n lat_Selk = 199\n lon_Selk = +7 # Latitude SUD\n ax.scatter(lat_Selk, lon_Selk, s=80, c='gold', marker='s')\n\n ax.set_xlim(360, 0)\n cbar = fig.colorbar(h[3], ax=ax)\n ax.grid('grey')\n cbar.set_label('Density of 3x3 px boxes (Nbr box per degree$^2$)')\n\n fig.savefig(figname)\n\n return\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef VIMS_band (i0, i1):\n \"\"\"\n Construction de liste de mots clefs permettant d'identifier les colonnes d'un DataFrame dans lequel\n il y a les données concernants l'incertitude relative et le I/F moyen des pavés 3x3 pixels.\n inputs:\n i0 (int): indice du premier canal VIMS à considérer.\n i1 (int): indice du dernier canal VIMS à considérer.\n outputs:\n list_DIsF : liste de string (les mots clefs) pour l'incertitude.\n list_IsFav : idem pour I/F moyen.\n \"\"\"\n list_DIsF = []\n list_IsFav = []\n for i in range(i0, i1+1):\n key_i = 'DIsF_'+str(i)\n list_DIsF.append(key_i)\n key_i = 'IFav_'+str(i)\n list_IsFav.append(key_i)\n\n return list_DIsF, list_IsFav\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef concat_VimsChan (DF, i0, i1):\n \"\"\"\n Extraction de tableaux Numpy qui sont les données des pavés, concaténées sur des bandes de canaux VIMS.\n inputs:\n DF: le DataFrame Panda avec les données des pavés.\n i0 (int): indice du premier canal VIMS à considérer.\n i1 (int): indice du dernier canal VIMS à considérer.\n outputs:\n IsFav_band: tableau Numpy avec les I/F moyen sur la bande définie par i0 et i1.\n DIsF_band: tableau Numpy avec les incertitudes sur la bande définie par i0 et i1.\n \"\"\"\n list_DIsF, list_IsFav = VIMS_band (i0, i1)\n\n DIsF_band = DF[list_DIsF[0]].to_numpy()\n IsFav_band = DF[list_IsFav[0]].to_numpy()\n\n for disf in list_DIsF[1:]:\n npDIsF = DF[disf].to_numpy()\n DIsF_band = np.append(DIsF_band, npDIsF)\n\n for isfav in list_IsFav[1:]:\n npIsFav = DF[isfav].to_numpy()\n IsFav_band = np.append(IsFav_band, npIsFav)\n\n return IsFav_band, DIsF_band\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef concat_DIsF_expo (DF_pix, DF_cube, i0, i1):\n \"\"\"\n Extraction des erreurs relatives de photométrie en fonction du temps d'exposition des cubes.\n Inputs:\n - DF_pix ----: le DataFrame contenant les données sur les pavés de 3x3 pixels.\n - Df_cube ---: le DaFrame contenant les données sur les cubes eux-mêmes.\n - i0 --------: indice du canal VIMS de début de la bande spectrale considérée.\n - i1 --------: indice du canal VIMS de fin de la bande spectrale considérée.\n Outputs:\n - DIsF_moy --: Numpy array des valeurs moyennes des erreurs relatives des pavés 3x3 pixels,\n la moyenne étant faite sur la bande considérée définie par (i0, i1) pour tous les\n pavés 3x3\n - expo_time -: les temps d'exposition (des cubes) correspondants.\n \"\"\"\n list_DIsF = VIMS_band (i0, i1) # On récupère les mots clés définissant les canaux VIMS sur lesquels\n # on travaille.\n DIsF_moy = np.array([])\n expo_time = np.array([])\n\n cub_list = DF_cube[\"Cube name\"].to_list() # On récupère la liste des identifiants des cubes VIMS sur\n # lesquels on travaille.\n for cn in cub_list:\n DIsF_band = DF_pix[list_DIsF[0]][DF_pix['Cube name'] == cn].to_numpy()\n exp_t = float(DF_cube[DF_cube['Cube name'] == cn]['Expo Time'])\n\n for dband in DIsF_band:\n m = np.mean(dband)\n #print (dband, m)\n DIsF_moy = np.append(DIsF_moy, m)\n expo_time = np.append(expo_time, exp_t)\n\n return DIsF_moy, expo_time\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef concat_VimsChan_lowAngDis (DF0, i0, i1, Dang):\n \"\"\"\n Même chose que 'concat_VimsChan', sauf qu'on applique une condition sur les écart-types relatifs des\n angles : Dphase, Dinc, Deme, les valeurs de ces \"D***\" devant être inférieures à 'Dang'.\n\n inputs:\n DF0 -----------: le DataFrame Panda avec les données des pavés.\n i0 (int) ------: indice du premier canal VIMS à considérer.\n i1 (int) ------: indice du dernier canal VIMS à considérer.\n Dang (float) --: valeur max. des écart-types _relatifs_ sur les angles.\n\n outputs:\n IsFav_band ----: tableau Numpy avec les I/F moyen sur la bande définie par i0 et i1.\n DIsF_band -----: tableau Numpy avec les incertitudes sur la bande définie par i0 et i1.\n \"\"\"\n\n list_DIsF, list_IsFav = VIMS_band (i0, i1)\n\n DF = DF0[(DF0['Dphase'] < Dang) & (DF0['Dinc'] < Dang) & (DF0['Deme'] < Dang)]\n\n DIsF_band = DF[list_DIsF[0]].to_numpy()\n IsFav_band = DF[list_IsFav[0]].to_numpy()\n\n for disf in list_DIsF[1:]:\n npDIsF = DF[disf].to_numpy()\n DIsF_band = np.append(DIsF_band, npDIsF)\n\n for isfav in list_IsFav[1:]:\n npIsFav = DF[isfav].to_numpy()\n IsFav_band = np.append(IsFav_band, npIsFav)\n\n return IsFav_band, DIsF_band\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef rm_NaN_Inf_nega (arrIsF, arrDIsF):\n \"\"\"\n On enlève les NaN, +/-Inf et I/F négatifs dans les tableaux 'IsF_av' et 'DIsF', quand un élément d'un des deux est\n enlevé, on enlève celui correspondant dans l'autre tableau (même s'il est ni NaN ou +/-Inf) ce qui\n permet de garder le même nombre d'éléments.\n Inputs:\n arrIsF: tableau Numpy des IsF_av des pavés de 3x3 pixels.\n arrDIsF: tableau Numpa des incertitudes relatives de 3x3 pixels.\n Outputs:\n IsF_clean: tableau Numpy sans les NaN et +/-Inf.\n DIsF_clean: Idem.\n \"\"\"\n dataF = pd.DataFrame({'IsF_av': arrIsF, 'DIsF': arrDIsF})\n dataF.replace([np.inf, -np.inf], np.nan, inplace=True)\n dataF.dropna(subset = ['IsF_av'], inplace=True)\n dataF.dropna(subset = ['DIsF'], inplace=True)\n\n #indexNames = dataF[dataF['IsF_av'] <= 0 ].index # On enlève les valeurs négatives.\n #dataF.drop(indexNames , inplace=True)\n #\n #indexNames = dataF[dataF['DIsF'] <= 0 ].index\n #dataF.drop(indexNames , inplace=True)\n\n dataF = dataF[dataF['IsF_av']>0]\n\n IsF_clean = dataF['IsF_av'].to_numpy()\n DIsF_clean = dataF['DIsF'].to_numpy()\n return IsF_clean, DIsF_clean\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef IsFavBand(Pav_DF, band, Dang):\n \"\"\"\n Inputs:\n - Pav_DF (Pandas DataFrame) ----: DataFrame containing data of 3x3 boxes extracted from VIMS cubes.\n - Band (list) ------------------: list specifying the properties of spectral bands used for the work.\n - Dang (float) -----------------: maximum relative standard deviation of angles between pixels in\n a given 3x3 boxes.\n Outputs:\n - IsFav_band_Da (list of Numpy array) --: average I/F for each band, for all 3x3 pixels boxes.\n - DIsF_band_Da (list of Numpy array) ---: relative standard deviation of I/F for each band, for\n all 3x3 pixels boxes.\n \"\"\"\n nbr_band = len(band)\n IsFav_band_Da = [np.array([])]*nbr_band\n DIsF_band_Da = [np.array([])]*nbr_band\n\n for i in range(nbr_band):\n IsFav_band_Da[i], DIsF_band_Da[i] = concat_VimsChan_lowAngDis (Pav_DF, band[i][0], band[i][1], Dang)\n\n k=5\n #print (len(IsFav_band_Da[k]))\n\n # Cleaning up: we remove all the Nan and Inf present within the data:\n for i in range(nbr_band):\n IsFav_band_Da[i], DIsF_band_Da[i] = rm_NaN_Inf_nega (IsFav_band_Da[i], DIsF_band_Da[i])\n\n #print (len(IsFav_band_Da[k]))\n\n return IsFav_band_Da, DIsF_band_Da\n\n #\n\n# -----------------------------------------------------------------------------------------------------------------------------------\ndef plot_band_avIF_DIF(band, cubes_dir, cname, IsFav_band, DIsF_band, figname):\n \"\"\"\n Inputs:\n # - nbr_band (int) ----------------------: number of spectral bands considered.\n - band (list) -------------------------: contains the specification of employed spectral bands.\n - cubes_dir (string) ------------------: name of the VIMS cubes directory.\n - cname (string) ----------------------: an example\n - IsFav_band (list of numpy arrays) ---: average I/F for each band, for all 3x3 pixels boxes.\n - DIsF_band (list of numpy arrays) ----: relative standard deviation of I/F for each band, for\n all 3x3 pixels boxes.\n - figPDFname (string) -----------------: name of the PDF file in which the figure is saved.\n \"\"\"\n # ---------------------------------------------------------------------------\n fig, (ax0, ax1) = plt.subplots(2, 1, figsize=(15, 10), tight_layout=True)\n # ---------------------------------------------------------------------------\n\n nbr_band = len(band)\n cub_VIMS = VIMS('1732876622_1', root=cubes_dir)\n px = [3, 4]\n cann_lambda = cub_VIMS.wvlns\n spectre = cub_VIMS[px].spectrum\n\n BANDS = {\n 1: (band[0][0], band[0][1], band[0][2]),\n 2: (band[1][0], band[1][1], band[1][2]),\n 3: (band[2][0], band[2][1], band[2][2]),\n 4: (band[3][0], band[3][1], band[3][2]),\n 5: (band[4][0], band[4][1], band[4][2]),\n 6: (band[5][0], band[5][1], band[5][2]),\n }\n #\n\n minor_locator = AutoMinorLocator(10)\n ax0.xaxis.set_minor_locator(minor_locator)\n minor_locator = AutoMinorLocator(5)\n ax1.xaxis.set_minor_locator(minor_locator)\n\n ax0.grid(True)\n ax0.set_ylim(-0.02, 0.20)\n\n ax0.set_xlabel('Wavelength (µm)')\n ax0.set_ylabel(r'$I/F$');\n\n ax0.xaxis.get_label().set_fontsize(20)\n ax0.yaxis.get_label().set_fontsize(20)\n ax0.tick_params(axis='x', labelsize=14)\n ax0.tick_params(axis='y', labelsize=14)\n\n #ax0.plot(cann, spectre, color='lightsteelblue')\n ax0.plot(cann_lambda, spectre, color='steelblue')\n ax0.scatter(cann_lambda, spectre, color='steelblue', s=10)\n\n for i, (b0, b1, color) in BANDS.items():\n w0, w1 = cub_VIMS.wvlns[b0], cub_VIMS.wvlns[b1]\n\n print(f'Band {i}: {b0}-{b1} | {w0:.3f}-{w1:.3f} µm')\n\n ax0.add_patch(Rectangle((w0, -0.005), w1 - w0, 0.1, edgecolor=color, facecolor='none'))\n ax0.text((w1 + w0) / 2, 0.105, i, color=color, va='center', ha='center')\n\n ax0.set_ylim(-0.005, None)\n\n # ---------------------------------------------------------------------------------------\n #\n ax1.xaxis.get_label().set_fontsize(20)\n ax1.yaxis.get_label().set_fontsize(20)\n ax1.grid(True)\n ax1.set_xlim(-0.02, 0.30)\n ax1.set_ylim(-3.2, 0.10)\n ax1.set(xlabel='Average $I/F$ over each 3x3 block', ylabel=r'$I/F$ standard deviation')\n #\n\n malpha = 0.1\n\n for i in range(nbr_band):\n ax1.scatter(IsFav_band[i], DIsF_band[i], color=band[i][2], s=2, marker='.', alpha=malpha)\n #\n # ---------------------------------------------------------------------------\n # On sauvegarde dans un fichier :\n fig.savefig(figname, dpi=300, facecolor='w', edgecolor='w',\n orientation='landscape')\n\n return\n","repo_name":"dcordiercnrs/VIMS-IR_uncertainties","sub_path":"VIMSU_2.py","file_name":"VIMSU_2.py","file_ext":"py","file_size_in_byte":13832,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"18109359589","text":"s = []\ne = input().split()\n\nfor c in e:\n if c in \"+-*\":\n a = s.pop()\n b = s.pop()\n s.append(str(eval(b + c + a)))\n else:\n s.append(c)\n\nprint(s[0])\n\n","repo_name":"Aasthaengg/IBMdataset","sub_path":"Python_codes/p02263/s923521774.py","file_name":"s923521774.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"33502091040","text":"\r\nimport pygame as pg\r\ndef play_music(music_file):\r\n\r\n clock = pg.time.Clock()\r\n try:\r\n pg.mixer.music.load(music_file)\r\n print(\"Music file {} loaded!\".format(music_file))\r\n except pygame.error:\r\n print(\"File {} not found! {}\".format(music_file, pg.get_error()))\r\n return\r\n pg.mixer.music.play()\r\n\r\n while pg.mixer.music.get_busy():\r\n clock.tick(30)\r\n\r\nmusic_file = \"a2.mid\"#pywork1保存的midi文件名\r\n\r\nfreq = 44100 \r\nbitsize = -16 \r\nchannels = 2 \r\nbuffer = 2048 \r\npg.mixer.init(freq, bitsize, channels, buffer)\r\n\r\npg.mixer.music.set_volume(0.8)\r\ntry:\r\n play_music(music_file)\r\nexcept KeyboardInterrupt:\r\n\r\n pg.mixer.music.fadeout(1000)\r\n pg.mixer.music.stop()\r\n raise SystemExit","repo_name":"William-Bruise/python_lemon","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"90"} +{"seq_id":"35656853510","text":"\"\"\"\nQuery azure resources and retrieve random starting structures, stored in containers.\n\nThe script is read-only and meant to test the access to tables.\n\"\"\"\n\nimport argparse\nimport dotenv\nimport os\nimport sys\n\n# Project root\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../'))\n\nfrom singleturn.singleturn_games_storage import SingleTurnGameStorage # noqa: E402\nfrom common import utils, logger # noqa: E402\n\n# Load dotenv before project imports\ndotenv.load_dotenv(\n os.path.join(os.path.dirname(__file__), '..', '.env'))\n\n_LOGGER = logger.get_logger(__name__)\n\n\ndef read_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--game_count',\n help='Total number games to query',\n type=int,\n default=30,\n )\n parser.add_argument(\n '--config',\n help='Environment to use for operations',\n choices=['production', 'sandbox'],\n default='sandbox',\n )\n return parser.parse_args()\n\n\ndef main():\n args = read_args()\n connection_str = os.getenv('AZURE_STORAGE_CONNECTION_STRING')\n config = utils.read_config(args.config, '../env_configs.json')\n\n with SingleTurnGameStorage(\n config['hits_table_name'], connection_str, config['starting_structures_container_name'],\n config['starting_structures_blob_prefix']) as game_storage:\n\n random_seed_structures = game_storage.select_start_worlds_ids(game_count=10)\n if len(random_seed_structures) != 10:\n _LOGGER.error(\"Error retrieving initial structure ids\")\n else:\n _LOGGER.info(f\"{len(random_seed_structures)} initial structure ids correctly restored \"\n \"from azure tables.\")\n\n last_game_index = game_storage.get_last_game_index()\n if last_game_index == 0:\n _LOGGER.error(\"Error retrieving data from container\")\n else:\n _LOGGER.info(f\"Last game id {last_game_index} read from container\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"iglu-contest/iglu-data-collection-tool","sub_path":"mturk_scripts/singleturn/tests/ping_azure_containers.py","file_name":"ping_azure_containers.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"90"} +{"seq_id":"22221828744","text":"# fonction qui recupere les articles\r\nfrom pyVinted import Vinted\r\nfrom urllib import request\r\nimport json\r\n\r\nNB_ARTICLES = 1\r\n\r\n\r\ndef get_products(URL, nb):\r\n vinted = Vinted()\r\n items = vinted.items.search(URL, nb, 1)\r\n for item in items:\r\n print(item.title, \":\", item.price + \"€\", \":\", item.url)\r\n return items\r\n\r\n\r\ndef articles_to_json(art):\r\n obj1 = str(art[0].__dict__)\r\n new = obj1.replace(\"\\'\", \"\\\"\")\r\n new = new.replace(\"None\", '0')\r\n new = new.replace(\"False\", '0')\r\n new = new.replace(\"True\", '1')\r\n new = new.replace(\"datetime.datetime(\", '[')\r\n new = new.replace(\"), \\\"raw_timestamp\\\":\", '], \\\"raw_timestamp\\\":')\r\n new = new.replace(\"tzinfo=datetime.timezone.utc\", '0')\r\n new = new.replace('»', ' ')\r\n new = new.replace('«', ' ')\r\n new = new.replace(\"Levi's\", 'Levis')\r\n new = new.replace(\"Jean's\", 'Jean')\r\n\r\n # écrire dans le data.json\r\n jsonFile = open(\"data.json\", \"w\")\r\n jsonFile.write(new)\r\n jsonFile.close()\r\n\r\n\r\n# filtre les données utiles à envoyer\r\ndef filtre_json():\r\n with open('data.json') as mon_fichier:\r\n data = json.load(mon_fichier)\r\n # check if article is new\r\n mon_id = data[\"raw_data\"][\"id\"]\r\n\r\n file = open(\"dataexist.txt\", \"r\")\r\n if str(mon_id) in file.read():\r\n print(\"----> Article deja existant\")\r\n else:\r\n with open('dataexist.txt', 'a') as file2:\r\n file2.write(str(mon_id) + \"\\n\")\r\n new_data = data[\"raw_data\"][\"title\"], data[\"raw_data\"][\"price\"] + \"€\", data[\"raw_data\"][\"url\"]\r\n return new_data\r\n\r\n\r\ndef envoie_discord(URL_VETEMENT, WEBHOOK_URL):\r\n articles = get_products(URL_VETEMENT, NB_ARTICLES)\r\n # création de la liste d'articles json a partir des articles -> data.json\r\n articles_to_json(articles)\r\n article_envoi = str(filtre_json())\r\n if article_envoi != 'None':\r\n # donnees utiles pour envoi sur discord\r\n payload = {\r\n 'embeds': [\r\n {\r\n 'title': articles[0].title,\r\n 'description': \"📏 | Taille: \" + articles[0].size_title + \\\r\n \"\\n\\n\" + \\\r\n \"💶 | Prix: \" + articles[0].price + \"€\\n\",\r\n 'url': articles[0].url,\r\n \"image\": {\r\n \"url\": articles[0].photo\r\n },\r\n 'author': {'name': 'Premium Vinted Bot', 'icon_url': 'https://img1.freepng.fr/20180320/fbe/kisspng-computer-icons-social-media-robot-scalable-vector-drawing-vector-robot-5ab16e73a00f11.3357462015215775876556.jpg'},\r\n },\r\n ],\r\n }\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'user-agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'\r\n }\r\n req = request.Request(url=WEBHOOK_URL,\r\n data=json.dumps(payload).encode('utf-8'),\r\n headers=headers,\r\n method='POST')\r\n # envoyer la request a discord\r\n request.urlopen(req)\r\n print(\"Article envoyé\")\r\n","repo_name":"Thackos/VPS1","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"90"} +{"seq_id":"38061541417","text":"import pandas as pd\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\nimport flask\n\n\ndef scrape():\n scrape_dict = {}\n\n # Open Browser\n executable_path = {'executable_path': '/usr/local/bin/chromedriver'}\n browser = Browser(\n 'chrome', **executable_path, headless=False)\n\n # NASA Mars News\n nasa_url = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\n browser.visit(nasa_url)\n soup_nasa = bs(browser.html)\n articles_dict = {\"Title\": [], \"Paragraph\": []}\n articles = soup_nasa.find_all(\"li\", class_=\"slide\")\n for article in articles:\n articles_dict[\"Title\"].append(\n article.find('div', class_=\"content_title\").text)\n articles_dict[\"Paragraph\"].append(article.find(\n 'div', class_=\"article_teaser_body\").text)\n scrape_dict[\"Mars_News\"] = articles_dict\n\n # JPL Mars Space Images - Featured Image\n jpl_img_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(jpl_img_url)\n soup_jpl = bs(browser.html)\n start_url = 'https://www.jpl.nasa.gov'\n featured_image_url = start_url + \\\n soup_jpl.find(\"a\", class_=\"button\")[\"data-fancybox-href\"]\n scrape_dict[\"Featured_Image\"] = featured_image_url\n\n # Mars Weather\n weather_url = \"https://twitter.com/marswxreport?lang=en\"\n browser.visit(weather_url)\n soup_weather = bs(browser.html)\n tweet = soup_weather.find('div', class_='content').find('p')\n unwanted = tweet.find('a')\n unwanted.extract()\n scrape_dict[\"Weather\"] = tweet.text\n\n # Mars Facts\n facts_url = \"https://space-facts.com/mars/\"\n facts_df = pd.read_html(facts_url)[0]\n facts_df = facts_df.rename(columns={0: \"description\", 1: \"value\"})\n facts_df = facts_df.set_index(\"description\")\n scrape_dict[\"Mars_Facts\"] = facts_df.to_html()\n\n # Mars Hemispheres\n astrogeology_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n browser.visit(astrogeology_url)\n soup_astro = bs(browser.html)\n imgs = soup_astro.find_all(\"div\", class_=\"item\")\n hemisphere_hrefs = []\n for div in imgs:\n hemisphere_hrefs.append(div.find('a')['href'])\n hemisphere_image_urls = []\n start_url = \"https://astrogeology.usgs.gov\"\n for href in hemisphere_hrefs:\n url = start_url + href\n browser.visit(url)\n soup_hemisphere = bs(browser.html)\n hemisphere = {\"title\": soup_hemisphere.find('h2').text.strip(\" Enhanced\"),\n \"img_url\": soup_hemisphere.find('div', class_=\"downloads\").find(\"a\")[\"href\"]}\n hemisphere_image_urls.append(hemisphere)\n scrape_dict[\"Hemisphere\"] = hemisphere_image_urls\n\n browser.quit()\n\n return scrape_dict\n","repo_name":"diana-md/Data-Analytics-Bootcamp-Projects","sub_path":"10.WebScraping/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"20612450230","text":"from training.data.textnoisifier import TextNoisifier\nfrom multiprocessing import Pool\n\n\ndef noisify(text):\n \"\"\"\n function wrapper to be fed on Pool.map()\n :param text:\n :return noisy text:\n \"\"\"\n return ntg.noisify(text)\n\n\ndef csv_to_dict(file):\n d = {}\n with open(file, 'r') as f:\n rows = f.read().split('\\n')\n for row in rows:\n k, v = row.split(',')\n d.update({k: v})\n return d\n\n\naccent_dict = csv_to_dict('training/data/common_accented_words.txt')\ncontract_dict = csv_to_dict('training/data/common_contracted_words.txt')\nphonetic_dict = csv_to_dict(\n 'training/data/common_phonetically_styled_words.txt')\nexpansion_dict = {v: k for k, v in contract_dict.items()}\n\nwith open('training/data/hyph_fil.tex', 'r') as f:\n hyphenator_dict = f.read()\n\nntg = TextNoisifier(accent_dict,\n phonetic_dict,\n contract_dict,\n expansion_dict,\n hyphenator_dict)\n\n\ndef main():\n clean_sentence = \"\"\"Aalis ka ba pag ikaw ang bida ako ang bida.\n Di ko na kaya pang umasa sa wala , ang diyan oo na mahal na kung mahal kita\n , ganoon pala yun gusto ko ayaw ko at gusto ko bahala kayo\"\"\"\n\n clean_sentence = ntg.expansion(clean_sentence)\n clean_sentence = ntg.expandable_expr.sub(ntg.word_expansion,\n clean_sentence)\n print(clean_sentence)\n noisy_sentence = ntg.contraction(clean_sentence)\n\n noisy_sentence = ntg.contractable_expr.sub(\n ntg.word_contraction, noisy_sentence)\n\n noisy_sentence = ntg.anable_expr.sub(\n ntg.word_ang_to_an, noisy_sentence)\n\n noisy_sentence = ntg.anu_expr.sub(\n ntg.word_ano, noisy_sentence)\n\n noisy_sentence = ntg.amable_expr.sub(\n ntg.word_ang_to_am, noisy_sentence)\n\n noisy_sentence = ntg.remove_space_expr.sub(\n ntg.word_remove_space, noisy_sentence)\n\n p = Pool()\n\n noisy_sentence = ' '.join(p.map(\n noisify, noisy_sentence.split()))\n\n print(noisy_sentence)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ryangmolina/text-normalization","sub_path":"noisifier.py","file_name":"noisifier.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"32095317785","text":"import gi\n\ngi.require_version(namespace='Gtk', version='4.0')\ngi.require_version(namespace='Adw', version='1')\nfrom gi.repository import Gtk, Adw\n\nAdw.init()\n\n\nclass ConverteUnidadeMedidaListBox():\n def __init__(self):\n pass\n\n def desenhar_listbox(self):\n vbox_listbox = Gtk.Box.new(orientation=Gtk.Orientation.VERTICAL, spacing=0)\n vbox_listbox.get_style_context().add_class(class_name='card')\n\n scrolledwindow = Gtk.ScrolledWindow.new()\n scrolledwindow.set_propagate_natural_height(True)\n vbox_listbox.append(child=scrolledwindow)\n\n listbox = Gtk.ListBox.new()\n # Definindo o modo de seleção.\n listbox.set_selection_mode(mode=Gtk.SelectionMode.NONE)\n scrolledwindow.set_child(child=listbox)\n\n for n in range(1, 10):\n row = Gtk.ListBoxRow.new()\n row.set_selectable(selectable=False)\n\n vbox_card = Gtk.Box.new(orientation=Gtk.Orientation.VERTICAL, spacing=0)\n vbox_card.get_style_context().add_class(class_name='card')\n row.set_child(child=vbox_card)\n\n label = Gtk.Label.new(str=f'Linha {n}')\n vbox_card.append(child=label)\n\n switch = Gtk.Switch.new()\n switch.set_margin_top(margin=6)\n switch.set_margin_end(margin=6)\n switch.set_margin_bottom(margin=6)\n switch.set_margin_start(margin=6)\n vbox_card.append(child=switch)\n\n listbox.append(child=row)\n\n # scrolledwindow.set_child(child=vbox_card)\n\n return vbox_listbox\n","repo_name":"johndizaro/ctrl","sub_path":"menu/cadastros_auxiliares/converte_unidade_medida/converte_unidade_medida_listbox.py","file_name":"converte_unidade_medida_listbox.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"14875783042","text":"#4. Given an array which may contain duplicates, print all elements and their frequencies.\n\nnum=[1,1,1,2,3,3,3,3,3,4,1,2,3,4,5]\n\nfreq={}\nfor item in num:\n if item in freq:\n freq[item]=freq[item]+1\n else:\n freq[item]=1\n \nprint(freq)\n","repo_name":"vijayabhaarathy/DSE_C0_Python","sub_path":"FreqofArrayElements.py","file_name":"FreqofArrayElements.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"38745777972","text":"from lark import Lark, Transformer, v_args\nfrom timeit import timeit\n\n## Lark\njson_grammar = r\"\"\"\n ?start: value\n ?value: object\n | array\n | string\n | SIGNED_NUMBER -> number\n | \"true\" -> true\n | \"false\" -> false\n | \"null\" -> null\n array : \"[\" [value (\",\" value)*] \"]\"\n object : \"{\" [pair (\",\" pair)*] \"}\"\n pair : string \":\" value\n string : ESCAPED_STRING\n %import common.ESCAPED_STRING\n %import common.SIGNED_NUMBER\n %import common.WS\n %ignore WS\n\"\"\"\n\n\nclass TreeToJson(Transformer):\n @v_args(inline=True)\n def string(self, s):\n return s[1:-1].replace('\\\\\"', '\"')\n\n array = list\n pair = tuple\n object = dict\n number = v_args(inline=True)(float)\n\n null = lambda self, _: None\n true = lambda self, _: True\n false = lambda self, _: False\n\n\n### Create the JSON parser with Lark, using the Earley algorithm\n# json_parser = Lark(json_grammar, parser='earley', lexer='standard')\n# def parse(x):\n# return TreeToJson().transform(json_parser.parse(x))\n\n### Create the JSON parser with Lark, using the LALR algorithm\njson_parser = Lark(\n json_grammar,\n parser=\"lalr\",\n # Using the standard lexer isn't required, and isn't usually recommended.\n # But, it's good enough for JSON, and it's slightly faster.\n lexer=\"standard\", # Disabling propagate_positions and placeholders slightly improves speed\n propagate_positions=False,\n maybe_placeholders=False,\n # Using an internal transformer is faster and more memory efficient\n transformer=TreeToJson(),\n)\nlark_parse = json_parser.parse\n\n## RBNF.hs\nfrom parser_wrap import parse\n\n## Python JSON\nfrom json import loads\n\ntext = \"\"\"\n{\n \"empty_object\" : {},\n \"empty_array\" : [],\n \"booleans\" : { \"YES\" : true, \"NO\" : false },\n \"numbers\" : [ 0, 1, -2, 3.3, 4.4e5, 6.6e-7 ],\n \"strings\" : [ \"This\", [ \"And\" , \"That\", \"And a \\\\\"b\" ] ],\n \"nothing\" : null\n}\n\"\"\"\n\n\nassert loads(text) == parse(text) == lark_parse(text)\nprint(timeit(\"parse(text)\", globals=dict(text=text, parse=loads), number=10000))\nprint(timeit(\"parse(text)\", globals=dict(text=text, parse=parse), number=10000))\nprint(timeit(\"parse(text)\", globals=dict(text=text, parse=lark_parse), number=10000))\n","repo_name":"thautwarm/frontend-for-free","sub_path":"runtest/rbnfjson/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"9"} +{"seq_id":"19128729621","text":"import http\n\nfrom thecompany_app import app\nfrom thecompany_app.models.department import Department\nfrom thecompany_app.tests.conftest import Conftest\n\n\nclass TestDepartmentView(Conftest):\n \"\"\"\n Department View page test cases\n \"\"\"\n\n \"\"\"\n This is the class for home_page view test case\n \"\"\"\n\n def test_departments_page(self):\n \"\"\"\n Testing /department page\n \"\"\"\n client = app.test_client()\n resp = client.get('/departments')\n self.assertEqual(200, resp.status_code)\n resp = client.post('/departments')\n self.assertEqual(405, resp.status_code)\n\n resp = client.get('/')\n self.assertEqual(200, resp.status_code)\n\n resp = client.post('/')\n self.assertEqual(405, resp.status_code)\n\n resp = client.post('/department', data={'department': 'WebDepartment'})\n self.assertEqual(302, resp.status_code)\n self.assertTrue(Department.check_if_exists('WebDepartment'))\n\n def test_department_update_page(self):\n \"\"\"\n Testing /departments/update//update page\n \"\"\"\n client = app.test_client()\n uuid = Department.get_all()[0].uuid\n resp = client.post('/departments/update/'+uuid, data={'department': 'ShitDepartment'})\n assert resp.status_code == http.HTTPStatus.FOUND\n self.assertTrue(Department.check_if_exists('ShitDepartment'))\n\n def test_department_delete_page(self):\n \"\"\"\n Testing /departments/delete/<:uuid> page\n \"\"\"\n client = app.test_client()\n uuid = Department.get_all()[0].uuid\n resp = client.post('/departments/delete/' + uuid)\n assert resp.status_code == http.HTTPStatus.FOUND\n with self.assertRaises(ValueError):\n Department.get_by_uuid(uuid)\n","repo_name":"dendeps/thecompany","sub_path":"thecompany_app/tests/test_department_view.py","file_name":"test_department_view.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"7306653061","text":"from selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys\nfrom locators.lecture_page_locators import LecturePageLocators\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\nclass LecturePage:\n def __init__(self, driver, wait):\n self.driver = driver\n self.wait = wait\n self.driver.get(\"https://lectoriy.mipt.ru/lecture\")\n self.title = self.driver.title\n self.locators = LecturePageLocators()\n\n def get_lecturer_names(self):\n lecturer_names = set()\n while True:\n lecturers = self.wait.until(EC.presence_of_all_elements_located(self.locators.LECTURER))\n for lecturer in lecturers:\n for name in lecturer.text.split(','):\n lecturer_names.add(name.strip())\n try:\n next_page_link = self.wait.until(EC.presence_of_element_located(self.locators.NEXT)).find_element_by_tag_name('a').get_attribute(\"href\")\n except NoSuchElementException:\n break\n self.driver.get(next_page_link)\n return lecturer_names\n","repo_name":"nikitayusupov/selenium-tests-mipt-hw","sub_path":"pages/lecture_page.py","file_name":"lecture_page.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"23076597828","text":"# To run and test the code you need to update 4 places:\n# 1. Change MY_EMAIL/MY_PASSWORD to your own details.\n# 2. Change MY_LAT/MY_LONG to your current location. latlong.net\n# 3. Go to your email provider and make it allow less secure apps.\n# 4. Update the SMTP ADDRESS to match your email provider.\n\nimport requests\nfrom datetime import datetime\nfrom email.message import EmailMessage\nimport ssl\nimport smtplib\nimport time\n\nMY_EMAIL = \"YOUR EMAIL\"\nMY_PASSWORD = \"YOUR PASSWORD\"\nMY_LAT = 26.245609 # Your latitude\nMY_LONG = 68.406731 # Your longitude\n\n\ndef is_overhead():\n response = requests.get(url=\"http://api.open-notify.org/iss-now.json\")\n response.raise_for_status()\n data = response.json()\n\n iss_latitude = float(data[\"iss_position\"][\"latitude\"])\n iss_longitude = float(data[\"iss_position\"][\"longitude\"])\n if MY_LAT-5 <= iss_latitude <= MY_LAT+5 and MY_LONG-5 <= iss_longitude <= MY_LONG+5:\n return True\n\n\ndef is_night():\n parameters = {\n \"lat\": MY_LAT,\n \"lng\": MY_LONG,\n \"formatted\": 0,\n }\n response = requests.get(\"https://api.sunrise-sunset.org/json\", params=parameters)\n response.raise_for_status()\n data = response.json()\n sunrise = int(data[\"results\"][\"sunrise\"].split(\"T\")[1].split(\":\")[0])\n sunset = int(data[\"results\"][\"sunset\"].split(\"T\")[1].split(\":\")[0])\n\n time_now = datetime.now().hour\n\n if time_now >= sunset or time_now <= sunrise:\n return True # IT's Dark\n\nwhile True:\n # If the ISS is close to my current position\n # and it is currently dark\n # Then send me an email to tell me to look up.\n if is_overhead() and is_night():\n msg = EmailMessage()\n msg['From'] = MY_EMAIL\n msg['To'] = MY_EMAIL\n msg['Subject'] = \"Look Up👆\"\n body = \"\"\"\n Hey!\n The ISS is above you in the sky.\n \"\"\"\n msg.set_content(body)\n context = ssl.create_default_context()\n with smtplib.SMTP_SSL(\"YOUR EMAIL PROVIDER SMTP SERVER ADDRESS\", context=context) as connection:\n connection.login(user=MY_EMAIL, password=MY_PASSWORD)\n connection.sendmail(\n from_addr=MY_EMAIL,\n to_addrs=\"malikfarhan57@outlook.com\",\n msg=msg.as_string()\n )\n time.sleep(60)\n","repo_name":"Farhan0016/Day-33_ISS-Overhead-Notifier","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"3672780398","text":"import logging\nimport sys\n\nsys.path.append('src')\n\nimport uvicorn\nfrom fastapi import FastAPI\n\nfrom api.v1.auth import api_auth_router\nfrom api.v1.routes import router\nfrom core.config import settings\n\nlogger = logging.getLogger(__name__)\n\napp = FastAPI(\n title=settings.APP_TITLE,\n openapi_tags=[\n {\n \"name\": \"v1\",\n \"description\": \"API v1\",\n },\n {\n \"name\": \"auth\",\n \"description\": \"Authentication\",\n },\n ],\n)\n\napp.include_router(router, prefix='/v1', tags=['v1'], )\napp.include_router(api_auth_router, prefix='/v1', tags=['auth'], )\n\nif __name__ == '__main__':\n uvicorn.run('main:app', host=settings.PROJECT_HOST, port=settings.PROJECT_PORT, reload=True)\n","repo_name":"ustnv/async-python-sprint-5","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"4109410433","text":"from web3 import Web3\nfrom solcx import compile_standard, install_solc\nimport json\nfrom dotenv import load_dotenv, dotenv_values\n\nload_dotenv()\nconfig = dotenv_values(\".env\")\n\nwith open(\"./ZonesStorage.sol\", \"r\") as file:\n simple_storage_file = file.read()\n\n\nprint(\"Installing ...\")\ninstall_solc(\"0.8.0\")\n\ncompile_sol = compile_standard({\n \"language\": \"Solidity\",\n \"sources\": {\n \"ZonesStorage.sol\": {\n \"content\": simple_storage_file\n }\n },\n \"settings\": {\n \"outputSelection\": {\n \"*\": {\n \"*\": [\"abi\", \"metadata\", \"evm.bytecode\", \"evm.sourceMap\"]\n }\n }\n }\n},\n solc_version=\"0.8.0\"\n)\n\nwith open(\"compiled_code.json\", \"w\") as file:\n json.dump(compile_sol, file)\n\n\n# get bytecode\nbytecode = compile_sol[\"contracts\"][\"ZonesStorage.sol\"][\"ZonesStorage\"][\"evm\"][\"bytecode\"][\"object\"]\n\n# get abi\nabi = compile_sol[\"contracts\"][\"ZonesStorage.sol\"][\"ZonesStorage\"][\"abi\"]\n\n\n# connecting to blockchain\nw3 = Web3(Web3.HTTPProvider(\"HTTP://127.0.0.1:7545\"))\nchain_id = 1337\naddr = config['ADDR']\npvt_key = config['PRIVATE_KEY']\n\n\n# Create a contract\nZonesStorage = w3.eth.contract(abi=abi, bytecode=bytecode)\nnonce = w3.eth.getTransactionCount(addr)\n\n\n# Build Transaction\ntransaction = ZonesStorage.constructor().buildTransaction(\n {\"chainId\": chain_id, \"from\": addr, \"nonce\": nonce, \"gasPrice\": w3.eth.gas_price})\n\nsigned_txn = w3.eth.account.sign_transaction(transaction, private_key=pvt_key)\n\n# send signed Transaction\ntxn_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)\n\nprint(\"Waiting for transaction to finish...\")\ntxn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash)\ncontract_addr = txn_receipt.contractAddress\nprint(f\"Done! Contract deployed to {contract_addr}\")\n","repo_name":"ankitsawho/blockchain-based-dns","sub_path":"deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"2465190980","text":"import argparse\nimport json\nimport os\nimport sys\nimport core\nimport message\nfrom Registry import module_dict\nfrom sendNotify import qq\n\n\ndef build_parser_from_cfg(task_cfg):\n parser_cfg = task_cfg['parser']\n obj_type = parser_cfg.pop('type')\n obj_cls = module_dict[obj_type]\n return obj_cls(**parser_cfg)\n\n\ndef build_message_from_cfg(task_cfg):\n parser_cfg = task_cfg['message']\n obj_type = parser_cfg.pop('type')\n obj_cls = module_dict[obj_type]\n return obj_cls(**parser_cfg)\n\n\ndef merge_cfg_by_default(task_cfg):\n with open(home(\"base_tasks.json\"), 'r', encoding='utf-8') as fr:\n base_tasks = json.loads(fr.read())\n for k, v in base_tasks.items():\n if k not in task_cfg:\n task_cfg[k] = v\n return task_cfg\n\n\ndef job(_task):\n try:\n print(\"running \", _task['title'])\n _task = merge_cfg_by_default(_task)\n old, new = build_parser_from_cfg(_task).parse(_task['title'])\n if new:\n qq(msg_to=_task['QQ'], msg=build_message_from_cfg(_task).build_message([i for i in new]), title=_task['title'])\n except BaseException as e:\n print(e)\n\n\ndef home(path):\n return os.path.join(sys.path[0], path)\n\n\n\"\"\"crontab\n0 6,18 * * *\n\"\"\"\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--debug', action='store_true')\n parser.add_argument('--show_cfg', action='store_true', help='打印支持的解析器与消息发送器')\n parser.add_argument('--show_tasks', action='store_true', help='打印所有任务')\n parser.add_argument('--show_tasks_', action='store_true', help='打印所有被禁用的任务')\n parser.add_argument('--show_records', action='store_true', help='打印所有任务记录')\n parser.add_argument('--with_readable', action='store_true', help='格式化json')\n args = parser.parse_args()\n if args.show_cfg:\n with open(home('doc.json'), mode='r', encoding='utf-8') as f:\n doc = json.loads(f.read())\n _p = []\n _m = []\n for k, v in module_dict.items():\n r = {}\n if k in doc:\n r[k] = doc[k]\n if str(k).endswith(\"Parser\"):\n _p.append(r)\n else:\n _m.append(r)\n print(json.dumps(dict(parser=_p, messager=_m), ensure_ascii=False, indent=4 if args.with_readable else None))\n sys.exit(0)\n with open(home(\"change_detection_tasks.json\"), 'r', encoding='utf-8') as f:\n tasks = json.loads(f.read())\n if args.show_tasks:\n print(json.dumps(tasks, ensure_ascii=False, indent=4 if args.with_readable else None))\n sys.exit(0)\n if args.show_tasks_:\n with open(home(\"_change_detection_tasks.json\"), 'r', encoding='utf-8') as f:\n print(json.dumps(json.loads(f.read()), ensure_ascii=False, indent=4 if args.with_readable else None))\n sys.exit(0)\n if args.show_records:\n if os.path.exists(home('task_data_store.json')):\n with open(home('task_data_store.json'), 'r', encoding='utf-8') as f:\n task_data_store = json.loads(f.read())\n print(json.dumps(task_data_store, ensure_ascii=False, indent=4 if args.with_readable else None))\n else:\n print(\"{}\")\n sys.exit(0)\n for task in tasks:\n job(task)\n","repo_name":"PPeanutButter/ChangeNotification","sub_path":"changedetection.py","file_name":"changedetection.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"33415354588","text":"import openalea.plantscan3d.mtgmanip as mm\nfrom openalea.mtg.io import write_mtg\nfrom openalea.plantgl.all import *\nfrom openalea.plantscan3d.serial import max_order\nfrom openalea.plantscan3d.xumethod import xu_method\n\nfrom utilities.configuration_file import *\n\n\ndef skeleton(points, binratio=50, k=20):\n \"\"\"\n The skeleton function creates a skeleton(using xu_method) from a pear tree point_cloud.\n This skeleton is stored in a .mtg file.\n This mtg file could be exported or worked with internally.\n\n :param points: scene[0].geometry.pointList\n :param binratio: binratio=50\n :param k: k=20\n :return: mtg file\n \"\"\"\n mini, maxi = points.getZMinAndMaxIndex()\n root = Vector3(points[mini])\n\n mtg = mm.initialize_mtg(root)\n zdist = points[maxi].z - points[mini].z\n binlength = zdist / binratio\n\n vtx = list(mtg.vertices(mtg.max_scale()))\n startfrom = vtx[0]\n mtg = xu_method(mtg, startfrom, points, binlength, k)\n\n return mtg\n\n\ndef create_scene_and_skeletonize(input_point_cloud_name):\n \"\"\"\n This function creates a scene > points and then converts it to a mtg file.\n :param input_point_cloud_name: name of the point cloud stored in the input point clouds dir\n :return: mtg\n \"\"\"\n scene = Scene(INPUT_POINT_CLOUDS_DIR + input_point_cloud_name)\n points = scene[0].geometry.pointList\n mtg = skeleton(points)\n return mtg\n\n\ndef writeMTGfile(fn, g, properties=[('XX', 'REAL'), ('YY', 'REAL'), ('ZZ', 'REAL'), ('radius', 'REAL')]):\n if properties == []:\n properties = [(p, 'REAL') for p in g.property_names() if p not in ['edge_type', 'index', 'label']]\n nb_tab = max_order(g)\n str = write_mtg(g, properties, nb_tab=nb_tab)\n f = open(fn, 'w+')\n f.write(str)\n f.close()\n\n\ndef main():\n \"\"\"\n The Skeletonization code creates a skeleton from a input point cloud.\n \"\"\"\n input_point_cloud_name = \"PCD60.ply\"\n\n mtg = create_scene_and_skeletonize(input_point_cloud_name)\n writeMTGfile(\"hoi.mtg\", mtg)\n\n # mtg_lines = write_mtg(mtg, properties=[('XX','REAL'), ('YY','REAL'), ('ZZ','REAL'), ('radius','REAL')])\n #\n # # Write the result into a file example.mtg\n # filename = 'example.mtg'\n # f = open(filename, 'w+')\n # f.write(mtg_lines)\n # f.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SmartFarmingPeren/SFP-DataBewerking","sub_path":"experimental/previous_group_25-05-2021/skeletonization/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"74100509092","text":"from gceapi.api import common as gce_common\nfrom gceapi.api import machine_type_api\nfrom gceapi.api import wsgi as gce_wsgi\n\n\nclass Controller(gce_common.Controller):\n \"\"\"GCE Machine types controller\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Controller, self).__init__(machine_type_api.API(),\n *args, **kwargs)\n\n def format_item(self, request, flavor, scope):\n result_dict = {\n \"name\": flavor[\"name\"],\n \"description\": \"\",\n \"guestCpus\": flavor[\"vcpus\"],\n \"memoryMb\": flavor[\"ram\"],\n \"imageSpaceGb\": flavor[\"disk\"],\n # NOTE(Alex): Is not supported by Openstack\n \"maximumPersistentDisks\": 0,\n # NOTE(Alex): Is not supported by Openstack\n \"maximumPersistentDisksSizeGb\": 0,\n }\n\n if \"OS-FLV-EXT-DATA:ephemeral\" in flavor:\n size = flavor[\"OS-FLV-EXT-DATA:ephemeral\"]\n if size > 0:\n result_dict[\"scratchDisks\"] = [{\"diskGb\": size}]\n\n return self._format_item(request, result_dict, scope)\n\n\ndef create_resource():\n return gce_wsgi.GCEResource(Controller())\n","repo_name":"yencarnacion/gce","sub_path":"gceapi/api/machine_types.py","file_name":"machine_types.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"2578881301","text":"#controllers/dashboard/user.py\nimport config, copy, lib, datetime\nfrom flask import render_template, session, request, redirect\nfrom models.userdb import Userdb\n\nclass User():\n def __init__(self):\n self.userdb = Userdb()\n self.lib = lib.Lib()\n\n def signup(self):\n vdict = copy.deepcopy(config.vdict)\n vdict['site_title'] = \"ទំព័រ​អ្នក​ប្រើប្រាស់\"\n vdict['datetime'] = self.lib.get_timezone()\n session['page'] = 0\n\n if request.method == \"POST\":\n username = request.form['fusername']\n content = request.form['fcontent']\n password = request.form['fpassword']\n role = request.form['frole']\n date = request.form['fdate']\n time = request.form['ftime']\n email = request.form['femail']\n edit_id = request.form['fedit-id']\n\n if not email:\n vdict['message'] = 'ចាំបាច់​ត្រូវ​មាន​ E-MAIL!'\n return render_template('dashboard/user.html', data=vdict)\n\n if (self.userdb.check_email(email)) and (not edit_id):\n vdict['message'] = 'E-MAIL នេះ​ត្រូវ​បាន​គេ​យក​ទៅ​ប្រើប្រាស់​ហើយ។'\n return render_template('dashboard/user.html', data=vdict)\n\n if 'logged-in' in session:\n author_id = session['author-id']\n author_role = self.userdb.check_author(author_id)\n author = author_role[1]\n else:\n author = 'root'\n\n try:\n date = datetime.datetime.strptime(date, \"%d/%m/%Y\")\n except ValueError:\n vdict['message'] = 'ទំរង់​កាលបរិច្ឆេទ​មិន​ត្រឹមត្រូវ!'\n return render_template('dashboard/user.html', data=vdict)\n\n try:\n time = datetime.datetime.strptime(time, \"%H:%M:%S\")\n except ValueError:\n vdict['message'] = 'ទំរង់​ពេល​វេលា​មិន​ត្រឹមត្រូវ!'\n return render_template('dashboard/user.html', data=vdict)\n\n if edit_id:\n if author_role[4] == 'Admin':\n self.userdb.update(username, email, password, role, content, date, time, author, edit_id)\n else:\n if author_role[4] == 'Admin':\n self.userdb.insert(username, email, password, role, content, date, time, author)\n\n vdict['users'] = self.userdb.select(vdict['dashboard_max_post'])\n vdict['thumbs'] = self.lib.get_thumbs(vdict['users'], 5, type='user')\n return render_template('dashboard/user.html', data=vdict)\n\n elif 'logged-in' in session:\n vdict['users'] = self.userdb.select(vdict['dashboard_max_post'])\n vdict['thumbs'] = self.lib.get_thumbs(vdict['users'], 5, type='user')\n return render_template('dashboard/user.html', data=vdict)\n else:\n return redirect('/login/')\n\n def edit(self, id):\n vdict = copy.deepcopy(config.vdict)\n vdict['blog_title'] = 'កែតំរូវ​អ្នក​ប្រើប្រាស់'\n vdict['edit-id'] = id\n\n if 'logged-in' in session:\n vdict['users'] = self.userdb.select(vdict['dashboard_max_post'])\n vdict['thumbs'] = self.lib.get_thumbs(vdict['users'], 5, type='user')\n vdict['user'] = self.userdb.select(id=id)\n date = (vdict['user'][6]).strftime('%d/%m/%Y')\n time = (vdict['user'][7]).strftime('%H:%M:%S')\n vdict['datetime'] = (date, time)\n\n return render_template('dashboard/user.html', data=vdict)\n\n return redirect('/login/')\n\n def delete(self, id):\n author_id = session['author-id']\n author_role = self.userdb.check_author(author_id)\n if author_role[4] == 'Admin':\n self.userdb.delete(id)\n\n return redirect('/dashboard/user/signup/')\n\n def load(self):\n if 'logged-in' in session:\n vdict = copy.deepcopy(config.vdict)\n session['page'] += 1\n vdict['users'] = self.userdb.select(vdict['dashboard_max_post'], page=session['page'])\n vdict['thumbs'] = self.lib.get_thumbs(vdict['users'], 5, type=\"user\")\n\n new_list = []\n for user in vdict['users']:\n new_user = list(user)\n new_user[6] = user[6].strftime('%d/%m/%Y') \n new_user[7] = user[7].strftime('%H:%M:%S') \n new_list.append(new_user)\n\n vdict['users'] = new_list\n return vdict\n else:\n return render_template('login.html', data=vdict)","repo_name":"Sokhavuth/ETV","sub_path":"controllers/dashboard/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"4666689667","text":"import pymongo\nimport pandas as pd\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/neurolabDB\")\nimport json\nDATABASE_NAME = \"thyroid\"\nCOLECTION_NAME = \"data\"\n\nif __name__ ==\"__main__\":\n df = pd.read_csv(\"/config/workspace/dataset/allrep.data\",header = None)\n column_list = ['age',\n 'sex',\n 'on_thyroxine',\n 'query_on_thyroxine',\n 'on_antithyroid_medication',\n 'sick',\n 'pregnant',\n 'thyroid_surgery',\n 'I131_treatment',\n 'query_hypothyroid',\n 'query_hyperthyroid',\n 'lithium',\n 'goitre',\n 'tumor',\n 'hypopituitary',\n 'psych',\n 'TSH_measured',\n 'TSH',\n 'T3_measured',\n 'T3',\n 'TT4_measured',\n 'TT4',\n 'T4U_measured',\n 'T4U',\n 'FTI_measured',\n 'FTI',\n 'TBG_measured',\n 'TBG',\n 'referral_source',\n 'Class']\n \n df.columns = column_list\n print(f\"Rows and columns: {df.shape}\")\n\n\n #reseting the index\n df.reset_index(drop=True, inplace = True)\n\n #converting dataframe to json\n json_record = list(json.loads(df.T.to_json()).values())\n print(json_record[:2])\n\n #insert convereted json record to mongo db\n client[DATABASE_NAME][COLECTION_NAME].insert_many(json_record)\n\n","repo_name":"rajandevkota98/ThyroidDiseaseDetection","sub_path":"data_dump.py","file_name":"data_dump.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"39357204124","text":"print('Exercice 5')\n\nimport numpy as np\nfrom numpy import array, dot, random\nimport matplotlib.pyplot as plt\n\n# Learning step\np = 30\nR = 1.5\nt = np.linspace(0, 2*np.pi, p)\n\n# First class +1\nx1= [ 1+R*random.rand()*np.cos(t[n]) for n in range(p) ]\ny1= [ 1+R*random.rand()*np.sin(t[n]) for n in range(p) ]\n\n# Second class 0\nx2= [ -1+R*random.rand()*np.cos(t[n]) for n in range(p) ]\ny2= [ -1+R*random.rand()*np.sin(t[n]) for n in range(p) ]\n\nplt.scatter(x1,y1,c='red', marker = 'o', s=4)\nplt.scatter(x2,y2,c='green', marker = 'o', s=4)\nplt.title('Perceptron algorithm')\n#plt.savefig('datapoints.png')\nplt.show()\n\n# Mise en place du data set\ntraining_data=[[x1[i],y1[i]] for i in range(p)]+[[x2[i],y2[i]] for i in range(p)]\n\n\n# initialisation\ncdt=0\nxb,yb=training_data[1]\nxa,ya=training_data[0]\n\n#bouclage\nwhile cdt==0:\n distancea=[np.sqrt((xa-training_data[i][0])**2 + (ya-training_data[i][1])**2) for i in range(len(training_data))]\n distanceb=[np.sqrt((xb-training_data[i][0])**2 + (yb-training_data[i][1])**2) for i in range(len(training_data))]\n GroupeAX,GroupeAY=[],[]\n GroupeBX,GroupeBY=[],[]\n for i in range(len(training_data)):\n if distancea[i] == min(distancea[i],distanceb[i]):\n GroupeAX += [training_data[i][0]]\n GroupeAY += [training_data[i][1]]\n else:\n GroupeBX += [training_data[i][0]]\n GroupeBY += [training_data[i][1]]\n plt.scatter(GroupeAX,GroupeAY,c='red', marker = 'o', s=4)\n plt.scatter(GroupeBX,GroupeBY,c='green', marker = 'o', s=4)\n plt.scatter([xa,xb],[ya,yb], c='black', marker='x', s=8)\n plt.show()\n\n xan, yan=[np.sum(GroupeAX)/len(GroupeAX), np.sum(GroupeAY)/len(GroupeAY)]\n xbn, ybn = [np.sum(GroupeBX) / len(GroupeBX), np.sum(GroupeBY) / len(GroupeBY)]\n if [xb, yb] == [xbn, ybn] and [xa, ya] == [xan, yan]:\n cdt = 1\n else:\n [xb, yb] = [xbn, ybn]\n [xa, ya] = [xan, yan]","repo_name":"Nigma141/CoursIA","sub_path":"ExerciceReseauNeuronne/Exercice5.py","file_name":"Exercice5.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"22165462469","text":"from ..core import EPCDispatcher\nfrom .utils import BaseTestCase\n\n\nclass Dummy(object):\n pass\n\n\nclass TestEPCDispatcher(BaseTestCase):\n\n def setUp(self):\n self.dispatcher = EPCDispatcher()\n\n def test_register_module(self):\n import os\n self.dispatcher.register_instance(os)\n self.assertIs(self.dispatcher.get_method('chmod'), os.chmod)\n\n def test_register_module_with_dotted_names(self):\n import os\n self.dispatcher.register_instance(os, allow_dotted_names=True)\n self.assertIs(self.dispatcher.get_method('path.join'), os.path.join)\n\n def test_error_on_private_method_access(self):\n obj = Dummy()\n obj._private_method = lambda: None\n obj.sub = Dummy()\n obj.sub._private_attribute = Dummy()\n obj.sub._private_attribute.some_method = lambda: None\n self.dispatcher.register_instance(obj, allow_dotted_names=True)\n self.assertRaises(AttributeError, self.dispatcher.get_method,\n '_private_method')\n self.assertRaises(AttributeError, self.dispatcher.get_method,\n 'obj.sub._private_attribute.some_method')\n\n def test_instance_get_method(self):\n always_me = lambda: None\n obj = Dummy()\n obj._get_method = lambda _: always_me\n self.dispatcher.register_instance(obj)\n self.assertIs(self.dispatcher.get_method('x'), always_me)\n self.assertIs(self.dispatcher.get_method('y'), always_me)\n","repo_name":"tkf/python-epc","sub_path":"epc/tests/test_dispatcher.py","file_name":"test_dispatcher.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"9"} +{"seq_id":"18570134899","text":"import wbdata\nimport datetime\nimport pandas as pd\n\nwbcols = {'PA.NUS.FCRF':'NER',\n 'FP.CPI.TOTL':'CPI',\n 'NY.GDP.MKTP.CD':'NGDP_USD',\n 'NY.GDP.MKTP.CN':'NGDP_LCU',\n 'NY.GDP.MKTP.KD':'RGDP_USD',\n 'NY.GDP.MKTP.KN':'RGDP_LCU',\n 'NE.IMP.GNFS.CD':'Imports_USD',\n 'NE.IMP.GNFS.CN':'Imports_LCU'}\n\ndata_date = datetime.datetime(1990, 1, 1), datetime.datetime(2010, 1, 1)\ncontrols = wbdata.get_dataframe(indicators=wbcols,country=\"all\",data_date=data_date,convert_date=False,keep_levels=False).reset_index()\n\nclist = wbdata.get_country(\"all\",display=False)\nids = [c['id'] for c in clist]\nnames = [c['name'] for c in clist]\ncountries = pd.DataFrame({'d':ids,'country':names})\ncontrols = pd.merge(left=controls,right=countries,how='left',on='country')\n\ncontrols.rename(columns={'date':'y'},inplace=True)\ncontrols['y'] = controls['y'].astype(int)\ncontrols.to_pickle('output/pik/wdi_data.pik')\n\n","repo_name":"joesteinberg/dyn-mkt-pen","sub_path":"programs/python/get_wbdata.py","file_name":"get_wbdata.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"39267850319","text":"import random\n\nNUCLEOBASES = \"ATGC\"\nDNA_SIZE = 100\n\nsequence = \"\".join([random.choice(NUCLEOBASES) for i in range(DNA_SIZE)])\n\nsu_a = 0\nsu_t = 0\nsu_g = 0\nsu_c = 0\n\nfor char in sequence:\n if char == \"A\":\n su_a = su_a + 1\n elif char == \"T\":\n su_t = su_t + 1\n elif char == \"G\":\n su_g = su_g + 1\n elif char == \"C\":\n su_c = su_c + 1\nprint(f\"Adenine:{su_a}\\nThymine:{su_t}\\nGuanine:{su_g}\\nCytosine:{su_c}\")\n","repo_name":"MeloGD/ASIR","sub_path":"imw/ut1/a8/a8p2.py","file_name":"a8p2.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"4751099176","text":"from typing import List\n\nimport schedule\n\nfrom src.audio import Audio\nfrom src.db.models.remindme import RemindMe\n\n\nclass Scheduler:\n scheduled_reminders = {}\n data_instance = None\n\n def __init__(self) -> None:\n self.audio = Audio()\n\n def schedule_all(self):\n reminders: List[RemindMe] = RemindMe.get_reminder_by_active_cols(True)\n for reminder in reminders:\n self.add_reminder(reminder)\n\n def add_reminder(self, reminder: RemindMe):\n for day in reminder.days:\n if day == \"Mon\":\n alert = (\n schedule.every()\n .monday.at(reminder.alert_time.isoformat())\n .do(self.run_reminder, reminder_id=reminder.id)\n )\n elif day == \"Tue\":\n alert = (\n schedule.every()\n .tuesday.at(reminder.alert_time.isoformat())\n .do(self.run_reminder, reminder_id=reminder.id)\n )\n elif day == \"Wed\":\n alert = (\n schedule.every()\n .wednesday.at(reminder.alert_time.isoformat())\n .do(self.run_reminder, reminder_id=reminder.id)\n )\n elif day == \"Thu\":\n alert = (\n schedule.every()\n .thursday.at(reminder.alert_time.isoformat())\n .do(self.run_reminder, reminder_id=reminder.id)\n )\n elif day == \"Fri\":\n alert = (\n schedule.every()\n .friday.at(reminder.alert_time.isoformat())\n .do(self.run_reminder, reminder_id=reminder.id)\n )\n elif day == \"Sat\":\n alert = (\n schedule.every()\n .saturday.at(reminder.alert_time.isoformat())\n .do(self.run_reminder, reminder_id=reminder.id)\n )\n elif day == \"Sun\":\n alert = (\n schedule.every()\n .sunday.at(reminder.alert_time.isoformat())\n .do(self.run_reminder, reminder_id=reminder.id)\n )\n Scheduler.scheduled_reminders[\n reminder.id\n ] = Scheduler.scheduled_reminders.get(reminder.id, {}) | {day: alert}\n\n def set_reminder_to_passed(self, reminder_id):\n \"\"\"change the reminder state that just alarmed to `passed`\n\n The only way I could get this to work was to\n - Get the reminder and change the state\n - delete that reminder from the total data of reminders\n - Then append them again.\n\n Args:\n reminder_id (int): ID of reminder to change the state.\n \"\"\"\n data = Scheduler.data_instance.data\n for item in data:\n if item[\"id\"] == reminder_id:\n idx = data.index(item)\n data[idx][\"state\"] = \"[color=#f74728]Passed[/color]\"\n modified_reminder = data[idx]\n data.remove(modified_reminder)\n Scheduler.data_instance.data = data + [modified_reminder]\n break\n\n def run_reminder(self, reminder_id: int):\n # Override the reminder's state\n self.set_reminder_to_passed(reminder_id)\n # Play the audio\n self.audio.play()\n","repo_name":"Eyongkevin/remindme-app","sub_path":"src/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"34772370818","text":"from os import system, name\n\ndef clear():\n system('cls' if name == 'nt' else 'clear')\n\ndef main():\n def menu():\n print(\"Menú\")\n print(\"1. Suma\")\n print(\"2. Resta\")\n print(\"3. Salir\")\n opcion = input(\"Elige una opción: \")\n clear()\n\n if opcion == \"3\":\n return\n elif opcion == \"1\":\n return suma()\n elif opcion == \"2\":\n return resta()\n else:\n return menu()\n\n def submenu(action):\n print(\"Submenú\")\n print(\"1. Regresar al menú principal\")\n print(\"2. Repetir operación\")\n print(\"3. Salir\")\n opcion = input(\"Elige una opción: \")\n clear()\n\n if opcion == \"1\":\n return menu()\n elif opcion == \"2\":\n return submenu(action)\n elif opcion == \"3\":\n return\n\n def suma():\n print(\"Suma\")\n a = int(input(\"Ingresa un número (a): \"))\n b = int(input(\"Ingresa un número (b): \"))\n print(f\"Resultado: {a + b}\")\n return submenu(suma)\n\n def resta():\n print(\"Resta\")\n a = int(input(\"Ingresa un número (a): \"))\n b = int(input(\"Ingresa un número (b): \"))\n print(f\"Resultado: {a - b}\")\n return submenu(resta)\n\n return menu()\n\n","repo_name":"LaloFl/LI_proyecto_final","sub_path":"u1t12_menu.py","file_name":"u1t12_menu.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"10249034320","text":"# coding: utf-8\n\n\"\"\"\n RIDB API\n\n The Recreation Information Database (RIDB) provides data resources to citizens, offering a single point of access to information about recreational opportunities nationwide. The RIDB represents an authoritative source of information and services for millions of visitors to federal lands, historic sites, museums, and other attractions/resources. This initiative integrates multiple Federal channels and sources about recreation opportunities into a one-stop, searchable database of recreational areas nationwide. # noqa: E501\n\n OpenAPI spec version: 1.0.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass RecreationArea(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n \"rec_area_id\": \"str\",\n \"org_rec_area_id\": \"str\",\n \"parent_org_id\": \"str\",\n \"rec_area_name\": \"str\",\n \"rec_area_description\": \"str\",\n \"rec_area_fee_description\": \"str\",\n \"rec_area_directions\": \"str\",\n \"rec_area_phone\": \"str\",\n \"rec_area_email\": \"str\",\n \"rec_area_reservation_url\": \"str\",\n \"rec_area_map_url\": \"str\",\n \"geojson\": \"RecreationAreaGEOJSON\",\n \"rec_area_longitude\": \"float\",\n \"rec_area_latitude\": \"float\",\n \"stay_limit\": \"str\",\n \"keywords\": \"str\",\n \"reservable\": \"bool\",\n \"enabled\": \"bool\",\n \"last_updated_date\": \"date\",\n \"organization\": \"list[Organization]\",\n \"facility\": \"list[RecreationAreaFacility]\",\n \"recareaaddress\": \"list[RecreationAreaAddress]\",\n \"activity\": \"list[RecreationAreaActivity]\",\n \"event\": \"list[Event]\",\n \"media\": \"list[Media]\",\n \"link\": \"list[Link]\",\n }\n\n attribute_map = {\n \"rec_area_id\": \"RecAreaID\",\n \"org_rec_area_id\": \"OrgRecAreaID\",\n \"parent_org_id\": \"ParentOrgID\",\n \"rec_area_name\": \"RecAreaName\",\n \"rec_area_description\": \"RecAreaDescription\",\n \"rec_area_fee_description\": \"RecAreaFeeDescription\",\n \"rec_area_directions\": \"RecAreaDirections\",\n \"rec_area_phone\": \"RecAreaPhone\",\n \"rec_area_email\": \"RecAreaEmail\",\n \"rec_area_reservation_url\": \"RecAreaReservationURL\",\n \"rec_area_map_url\": \"RecAreaMapURL\",\n \"geojson\": \"GEOJSON\",\n \"rec_area_longitude\": \"RecAreaLongitude\",\n \"rec_area_latitude\": \"RecAreaLatitude\",\n \"stay_limit\": \"StayLimit\",\n \"keywords\": \"Keywords\",\n \"reservable\": \"Reservable\",\n \"enabled\": \"Enabled\",\n \"last_updated_date\": \"LastUpdatedDate\",\n \"organization\": \"ORGANIZATION\",\n \"facility\": \"FACILITY\",\n \"recareaaddress\": \"RECAREAADDRESS\",\n \"activity\": \"ACTIVITY\",\n \"event\": \"EVENT\",\n \"media\": \"MEDIA\",\n \"link\": \"LINK\",\n }\n\n def __init__(\n self,\n rec_area_id=None,\n org_rec_area_id=None,\n parent_org_id=None,\n rec_area_name=None,\n rec_area_description=None,\n rec_area_fee_description=None,\n rec_area_directions=None,\n rec_area_phone=None,\n rec_area_email=None,\n rec_area_reservation_url=None,\n rec_area_map_url=None,\n geojson=None,\n rec_area_longitude=None,\n rec_area_latitude=None,\n stay_limit=None,\n keywords=None,\n reservable=None,\n enabled=None,\n last_updated_date=None,\n organization=None,\n facility=None,\n recareaaddress=None,\n activity=None,\n event=None,\n media=None,\n link=None,\n ): # noqa: E501\n \"\"\"RecreationArea - a model defined in Swagger\"\"\" # noqa: E501\n self._rec_area_id = None\n self._org_rec_area_id = None\n self._parent_org_id = None\n self._rec_area_name = None\n self._rec_area_description = None\n self._rec_area_fee_description = None\n self._rec_area_directions = None\n self._rec_area_phone = None\n self._rec_area_email = None\n self._rec_area_reservation_url = None\n self._rec_area_map_url = None\n self._geojson = None\n self._rec_area_longitude = None\n self._rec_area_latitude = None\n self._stay_limit = None\n self._keywords = None\n self._reservable = None\n self._enabled = None\n self._last_updated_date = None\n self._organization = None\n self._facility = None\n self._recareaaddress = None\n self._activity = None\n self._event = None\n self._media = None\n self._link = None\n self.discriminator = None\n self.rec_area_id = rec_area_id\n self.org_rec_area_id = org_rec_area_id\n if parent_org_id is not None:\n self.parent_org_id = parent_org_id\n self.rec_area_name = rec_area_name\n self.rec_area_description = rec_area_description\n self.rec_area_fee_description = rec_area_fee_description\n self.rec_area_directions = rec_area_directions\n self.rec_area_phone = rec_area_phone\n self.rec_area_email = rec_area_email\n self.rec_area_reservation_url = rec_area_reservation_url\n self.rec_area_map_url = rec_area_map_url\n self.geojson = geojson\n self.rec_area_longitude = rec_area_longitude\n self.rec_area_latitude = rec_area_latitude\n self.stay_limit = stay_limit\n self.keywords = keywords\n self.reservable = reservable\n self.enabled = enabled\n self.last_updated_date = last_updated_date\n if organization is not None:\n self.organization = organization\n if facility is not None:\n self.facility = facility\n if recareaaddress is not None:\n self.recareaaddress = recareaaddress\n if activity is not None:\n self.activity = activity\n if event is not None:\n self.event = event\n if media is not None:\n self.media = media\n if link is not None:\n self.link = link\n\n @property\n def rec_area_id(self):\n \"\"\"Gets the rec_area_id of this RecreationArea. # noqa: E501\n\n RIDB unique RecArea ID # noqa: E501\n\n :return: The rec_area_id of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_id\n\n @rec_area_id.setter\n def rec_area_id(self, rec_area_id):\n \"\"\"Sets the rec_area_id of this RecreationArea.\n\n RIDB unique RecArea ID # noqa: E501\n\n :param rec_area_id: The rec_area_id of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_id is None:\n raise ValueError(\n \"Invalid value for `rec_area_id`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_id = rec_area_id\n\n @property\n def org_rec_area_id(self):\n \"\"\"Gets the org_rec_area_id of this RecreationArea. # noqa: E501\n\n The agency's internal RecArea ID provided to the RIDB by the agency # noqa: E501\n\n :return: The org_rec_area_id of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._org_rec_area_id\n\n @org_rec_area_id.setter\n def org_rec_area_id(self, org_rec_area_id):\n \"\"\"Sets the org_rec_area_id of this RecreationArea.\n\n The agency's internal RecArea ID provided to the RIDB by the agency # noqa: E501\n\n :param org_rec_area_id: The org_rec_area_id of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if org_rec_area_id is None:\n raise ValueError(\n \"Invalid value for `org_rec_area_id`, must not be `None`\"\n ) # noqa: E501\n\n self._org_rec_area_id = org_rec_area_id\n\n @property\n def parent_org_id(self):\n \"\"\"Gets the parent_org_id of this RecreationArea. # noqa: E501\n\n Parent Organization ID # noqa: E501\n\n :return: The parent_org_id of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._parent_org_id\n\n @parent_org_id.setter\n def parent_org_id(self, parent_org_id):\n \"\"\"Sets the parent_org_id of this RecreationArea.\n\n Parent Organization ID # noqa: E501\n\n :param parent_org_id: The parent_org_id of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n\n self._parent_org_id = parent_org_id\n\n @property\n def rec_area_name(self):\n \"\"\"Gets the rec_area_name of this RecreationArea. # noqa: E501\n\n Full name of the RecArea # noqa: E501\n\n :return: The rec_area_name of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_name\n\n @rec_area_name.setter\n def rec_area_name(self, rec_area_name):\n \"\"\"Sets the rec_area_name of this RecreationArea.\n\n Full name of the RecArea # noqa: E501\n\n :param rec_area_name: The rec_area_name of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_name is None:\n raise ValueError(\n \"Invalid value for `rec_area_name`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_name = rec_area_name\n\n @property\n def rec_area_description(self):\n \"\"\"Gets the rec_area_description of this RecreationArea. # noqa: E501\n\n Text that describes the RecArea # noqa: E501\n\n :return: The rec_area_description of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_description\n\n @rec_area_description.setter\n def rec_area_description(self, rec_area_description):\n \"\"\"Sets the rec_area_description of this RecreationArea.\n\n Text that describes the RecArea # noqa: E501\n\n :param rec_area_description: The rec_area_description of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_description is None:\n raise ValueError(\n \"Invalid value for `rec_area_description`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_description = rec_area_description\n\n @property\n def rec_area_fee_description(self):\n \"\"\"Gets the rec_area_fee_description of this RecreationArea. # noqa: E501\n\n Text describing monetary charges associated with entrance to or usage of a RecArea # noqa: E501\n\n :return: The rec_area_fee_description of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_fee_description\n\n @rec_area_fee_description.setter\n def rec_area_fee_description(self, rec_area_fee_description):\n \"\"\"Sets the rec_area_fee_description of this RecreationArea.\n\n Text describing monetary charges associated with entrance to or usage of a RecArea # noqa: E501\n\n :param rec_area_fee_description: The rec_area_fee_description of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_fee_description is None:\n raise ValueError(\n \"Invalid value for `rec_area_fee_description`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_fee_description = rec_area_fee_description\n\n @property\n def rec_area_directions(self):\n \"\"\"Gets the rec_area_directions of this RecreationArea. # noqa: E501\n\n Directions to the RecArea # noqa: E501\n\n :return: The rec_area_directions of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_directions\n\n @rec_area_directions.setter\n def rec_area_directions(self, rec_area_directions):\n \"\"\"Sets the rec_area_directions of this RecreationArea.\n\n Directions to the RecArea # noqa: E501\n\n :param rec_area_directions: The rec_area_directions of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_directions is None:\n raise ValueError(\n \"Invalid value for `rec_area_directions`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_directions = rec_area_directions\n\n @property\n def rec_area_phone(self):\n \"\"\"Gets the rec_area_phone of this RecreationArea. # noqa: E501\n\n Phone number for RecArea # noqa: E501\n\n :return: The rec_area_phone of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_phone\n\n @rec_area_phone.setter\n def rec_area_phone(self, rec_area_phone):\n \"\"\"Sets the rec_area_phone of this RecreationArea.\n\n Phone number for RecArea # noqa: E501\n\n :param rec_area_phone: The rec_area_phone of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_phone is None:\n raise ValueError(\n \"Invalid value for `rec_area_phone`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_phone = rec_area_phone\n\n @property\n def rec_area_email(self):\n \"\"\"Gets the rec_area_email of this RecreationArea. # noqa: E501\n\n Email address of the RecArea # noqa: E501\n\n :return: The rec_area_email of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_email\n\n @rec_area_email.setter\n def rec_area_email(self, rec_area_email):\n \"\"\"Sets the rec_area_email of this RecreationArea.\n\n Email address of the RecArea # noqa: E501\n\n :param rec_area_email: The rec_area_email of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_email is None:\n raise ValueError(\n \"Invalid value for `rec_area_email`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_email = rec_area_email\n\n @property\n def rec_area_reservation_url(self):\n \"\"\"Gets the rec_area_reservation_url of this RecreationArea. # noqa: E501\n\n Internet address (URL) for the web site hosting the reservation system # noqa: E501\n\n :return: The rec_area_reservation_url of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_reservation_url\n\n @rec_area_reservation_url.setter\n def rec_area_reservation_url(self, rec_area_reservation_url):\n \"\"\"Sets the rec_area_reservation_url of this RecreationArea.\n\n Internet address (URL) for the web site hosting the reservation system # noqa: E501\n\n :param rec_area_reservation_url: The rec_area_reservation_url of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_reservation_url is None:\n raise ValueError(\n \"Invalid value for `rec_area_reservation_url`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_reservation_url = rec_area_reservation_url\n\n @property\n def rec_area_map_url(self):\n \"\"\"Gets the rec_area_map_url of this RecreationArea. # noqa: E501\n\n Internet address (URL) that hosts the RecArea map # noqa: E501\n\n :return: The rec_area_map_url of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._rec_area_map_url\n\n @rec_area_map_url.setter\n def rec_area_map_url(self, rec_area_map_url):\n \"\"\"Sets the rec_area_map_url of this RecreationArea.\n\n Internet address (URL) that hosts the RecArea map # noqa: E501\n\n :param rec_area_map_url: The rec_area_map_url of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if rec_area_map_url is None:\n raise ValueError(\n \"Invalid value for `rec_area_map_url`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_map_url = rec_area_map_url\n\n @property\n def geojson(self):\n \"\"\"Gets the geojson of this RecreationArea. # noqa: E501\n\n\n :return: The geojson of this RecreationArea. # noqa: E501\n :rtype: RecreationAreaGEOJSON\n \"\"\"\n return self._geojson\n\n @geojson.setter\n def geojson(self, geojson):\n \"\"\"Sets the geojson of this RecreationArea.\n\n\n :param geojson: The geojson of this RecreationArea. # noqa: E501\n :type: RecreationAreaGEOJSON\n \"\"\"\n if geojson is None:\n raise ValueError(\n \"Invalid value for `geojson`, must not be `None`\"\n ) # noqa: E501\n\n self._geojson = geojson\n\n @property\n def rec_area_longitude(self):\n \"\"\"Gets the rec_area_longitude of this RecreationArea. # noqa: E501\n\n Longitude in decimal degrees -180.0 to 180.0 # noqa: E501\n\n :return: The rec_area_longitude of this RecreationArea. # noqa: E501\n :rtype: float\n \"\"\"\n return self._rec_area_longitude\n\n @rec_area_longitude.setter\n def rec_area_longitude(self, rec_area_longitude):\n \"\"\"Sets the rec_area_longitude of this RecreationArea.\n\n Longitude in decimal degrees -180.0 to 180.0 # noqa: E501\n\n :param rec_area_longitude: The rec_area_longitude of this RecreationArea. # noqa: E501\n :type: float\n \"\"\"\n if rec_area_longitude is None:\n raise ValueError(\n \"Invalid value for `rec_area_longitude`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_longitude = rec_area_longitude\n\n @property\n def rec_area_latitude(self):\n \"\"\"Gets the rec_area_latitude of this RecreationArea. # noqa: E501\n\n Latitude in decimal degrees -90.0 to 90.0 # noqa: E501\n\n :return: The rec_area_latitude of this RecreationArea. # noqa: E501\n :rtype: float\n \"\"\"\n return self._rec_area_latitude\n\n @rec_area_latitude.setter\n def rec_area_latitude(self, rec_area_latitude):\n \"\"\"Sets the rec_area_latitude of this RecreationArea.\n\n Latitude in decimal degrees -90.0 to 90.0 # noqa: E501\n\n :param rec_area_latitude: The rec_area_latitude of this RecreationArea. # noqa: E501\n :type: float\n \"\"\"\n if rec_area_latitude is None:\n raise ValueError(\n \"Invalid value for `rec_area_latitude`, must not be `None`\"\n ) # noqa: E501\n\n self._rec_area_latitude = rec_area_latitude\n\n @property\n def stay_limit(self):\n \"\"\"Gets the stay_limit of this RecreationArea. # noqa: E501\n\n Details on the stay limits for the RecArea # noqa: E501\n\n :return: The stay_limit of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._stay_limit\n\n @stay_limit.setter\n def stay_limit(self, stay_limit):\n \"\"\"Sets the stay_limit of this RecreationArea.\n\n Details on the stay limits for the RecArea # noqa: E501\n\n :param stay_limit: The stay_limit of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if stay_limit is None:\n raise ValueError(\n \"Invalid value for `stay_limit`, must not be `None`\"\n ) # noqa: E501\n\n self._stay_limit = stay_limit\n\n @property\n def keywords(self):\n \"\"\"Gets the keywords of this RecreationArea. # noqa: E501\n\n List of keywords for the RecArea # noqa: E501\n\n :return: The keywords of this RecreationArea. # noqa: E501\n :rtype: str\n \"\"\"\n return self._keywords\n\n @keywords.setter\n def keywords(self, keywords):\n \"\"\"Sets the keywords of this RecreationArea.\n\n List of keywords for the RecArea # noqa: E501\n\n :param keywords: The keywords of this RecreationArea. # noqa: E501\n :type: str\n \"\"\"\n if keywords is None:\n raise ValueError(\n \"Invalid value for `keywords`, must not be `None`\"\n ) # noqa: E501\n\n self._keywords = keywords\n\n @property\n def reservable(self):\n \"\"\"Gets the reservable of this RecreationArea. # noqa: E501\n\n Whether the RecArea is reservable # noqa: E501\n\n :return: The reservable of this RecreationArea. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._reservable\n\n @reservable.setter\n def reservable(self, reservable):\n \"\"\"Sets the reservable of this RecreationArea.\n\n Whether the RecArea is reservable # noqa: E501\n\n :param reservable: The reservable of this RecreationArea. # noqa: E501\n :type: bool\n \"\"\"\n if reservable is None:\n raise ValueError(\n \"Invalid value for `reservable`, must not be `None`\"\n ) # noqa: E501\n\n self._reservable = reservable\n\n @property\n def enabled(self):\n \"\"\"Gets the enabled of this RecreationArea. # noqa: E501\n\n Whether the RecArea is enabled # noqa: E501\n\n :return: The enabled of this RecreationArea. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n \"\"\"Sets the enabled of this RecreationArea.\n\n Whether the RecArea is enabled # noqa: E501\n\n :param enabled: The enabled of this RecreationArea. # noqa: E501\n :type: bool\n \"\"\"\n if enabled is None:\n raise ValueError(\n \"Invalid value for `enabled`, must not be `None`\"\n ) # noqa: E501\n\n self._enabled = enabled\n\n @property\n def last_updated_date(self):\n \"\"\"Gets the last_updated_date of this RecreationArea. # noqa: E501\n\n Record last update date # noqa: E501\n\n :return: The last_updated_date of this RecreationArea. # noqa: E501\n :rtype: date\n \"\"\"\n return self._last_updated_date\n\n @last_updated_date.setter\n def last_updated_date(self, last_updated_date):\n \"\"\"Sets the last_updated_date of this RecreationArea.\n\n Record last update date # noqa: E501\n\n :param last_updated_date: The last_updated_date of this RecreationArea. # noqa: E501\n :type: date\n \"\"\"\n if last_updated_date is None:\n raise ValueError(\n \"Invalid value for `last_updated_date`, must not be `None`\"\n ) # noqa: E501\n\n self._last_updated_date = last_updated_date\n\n @property\n def organization(self):\n \"\"\"Gets the organization of this RecreationArea. # noqa: E501\n\n\n :return: The organization of this RecreationArea. # noqa: E501\n :rtype: list[Organization]\n \"\"\"\n return self._organization\n\n @organization.setter\n def organization(self, organization):\n \"\"\"Sets the organization of this RecreationArea.\n\n\n :param organization: The organization of this RecreationArea. # noqa: E501\n :type: list[Organization]\n \"\"\"\n\n self._organization = organization\n\n @property\n def facility(self):\n \"\"\"Gets the facility of this RecreationArea. # noqa: E501\n\n\n :return: The facility of this RecreationArea. # noqa: E501\n :rtype: list[RecreationAreaFacility]\n \"\"\"\n return self._facility\n\n @facility.setter\n def facility(self, facility):\n \"\"\"Sets the facility of this RecreationArea.\n\n\n :param facility: The facility of this RecreationArea. # noqa: E501\n :type: list[RecreationAreaFacility]\n \"\"\"\n\n self._facility = facility\n\n @property\n def recareaaddress(self):\n \"\"\"Gets the recareaaddress of this RecreationArea. # noqa: E501\n\n\n :return: The recareaaddress of this RecreationArea. # noqa: E501\n :rtype: list[RecreationAreaAddress]\n \"\"\"\n return self._recareaaddress\n\n @recareaaddress.setter\n def recareaaddress(self, recareaaddress):\n \"\"\"Sets the recareaaddress of this RecreationArea.\n\n\n :param recareaaddress: The recareaaddress of this RecreationArea. # noqa: E501\n :type: list[RecreationAreaAddress]\n \"\"\"\n\n self._recareaaddress = recareaaddress\n\n @property\n def activity(self):\n \"\"\"Gets the activity of this RecreationArea. # noqa: E501\n\n\n :return: The activity of this RecreationArea. # noqa: E501\n :rtype: list[RecreationAreaActivity]\n \"\"\"\n return self._activity\n\n @activity.setter\n def activity(self, activity):\n \"\"\"Sets the activity of this RecreationArea.\n\n\n :param activity: The activity of this RecreationArea. # noqa: E501\n :type: list[RecreationAreaActivity]\n \"\"\"\n\n self._activity = activity\n\n @property\n def event(self):\n \"\"\"Gets the event of this RecreationArea. # noqa: E501\n\n\n :return: The event of this RecreationArea. # noqa: E501\n :rtype: list[Event]\n \"\"\"\n return self._event\n\n @event.setter\n def event(self, event):\n \"\"\"Sets the event of this RecreationArea.\n\n\n :param event: The event of this RecreationArea. # noqa: E501\n :type: list[Event]\n \"\"\"\n\n self._event = event\n\n @property\n def media(self):\n \"\"\"Gets the media of this RecreationArea. # noqa: E501\n\n\n :return: The media of this RecreationArea. # noqa: E501\n :rtype: list[Media]\n \"\"\"\n return self._media\n\n @media.setter\n def media(self, media):\n \"\"\"Sets the media of this RecreationArea.\n\n\n :param media: The media of this RecreationArea. # noqa: E501\n :type: list[Media]\n \"\"\"\n\n self._media = media\n\n @property\n def link(self):\n \"\"\"Gets the link of this RecreationArea. # noqa: E501\n\n\n :return: The link of this RecreationArea. # noqa: E501\n :rtype: list[Link]\n \"\"\"\n return self._link\n\n @link.setter\n def link(self, link):\n \"\"\"Sets the link of this RecreationArea.\n\n\n :param link: The link of this RecreationArea. # noqa: E501\n :type: list[Link]\n \"\"\"\n\n self._link = link\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(\n map(lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x, value)\n )\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(\n map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\")\n else item,\n value.items(),\n )\n )\n else:\n result[attr] = value\n if issubclass(RecreationArea, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RecreationArea):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"juftin/recdotgov-client","sub_path":"recdotgov_client/models/recreation_area.py","file_name":"recreation_area.py","file_ext":"py","file_size_in_byte":27742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"40846985311","text":"import io\nimport setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nmeta = {}\nwith io.open('./src/todo_or_die/version.py', encoding='utf-8') as f:\n exec(f.read(), meta)\n\nsetuptools.setup(\n name=\"todo-or-die-python\",\n version=meta['__version__'],\n author=\"Brandon Walsh\",\n author_email=\"bmwalshy@gmail.com\",\n description=\"Halt code if your TODO is passed due\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/walshyb/todo-or-die-python\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/walshyb/todo-or-die-python/issues\",\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.6\",\n)","repo_name":"walshyb/todo-or-die-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"9"} +{"seq_id":"12634690408","text":"from os import path\nfrom premiumFinance.settings import PROJECT_ROOT\n\nVBT_UNISMOKE_URL = \"https://www.soa.org/globalassets/assets/files/research/exp-study/2015-vbt-unismoke-alb-anb.xlsx\"\nVBT_SMOKEDISTINCT_URL = \"https://www.soa.org/globalassets/assets/files/research/exp-study/2015-vbt-smoker-distinct-alb-anb.xlsx\"\nPERSIST_URL = \"https://www.soa.org/globalassets/assets/files/resources/research-report/2019/2009-13-us-ind-life-persistency-excel.xlsx\"\n\nMORT_URL = \"http://cdn-files.soa.org/research/2009-15_Data_20180601.zip\"\nYIELD_URL = \"https://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData\"\nDATA_FOLDER = path.join(PROJECT_ROOT, \"data\")\nFIGURE_FOLDER = path.join(PROJECT_ROOT, \"figures\")\n\nMORTALITY_TABLE_CLEANED_PATH = path.join(DATA_FOLDER, \"mortality_experience_clean.xlsx\")\nPROCESSED_PROFITABILITY_PATH = path.join(DATA_FOLDER, \"profitability.json\")\n\nNAIC_PATH = path.join(DATA_FOLDER, \"NAIC_1996_2020_SPGlobalofficeworkbook.xls\")\n\nYIELD_DURATION = {\n \"1MONTH\": 1 / 12,\n \"2MONTH\": 2 / 12,\n \"3MONTH\": 3 / 12,\n \"6MONTH\": 6 / 12,\n \"1YEAR\": 1,\n \"2YEAR\": 2,\n \"3YEAR\": 3,\n \"5YEAR\": 5,\n \"7YEAR\": 7,\n \"10YEAR\": 10,\n \"20YEAR\": 20,\n \"30YEAR\": 30,\n}\n\nFIN_OPTIONS = [\"lapse\", \"nonrecourse\", \"fullrecourse\", \"sale\"]\n\nVBT_TABLES = {\n \"VBT01\": {\n \"m\": {\"unism\": 1148, \"nonsm\": 1149, \"smoke\": 1150},\n \"f\": {\"unism\": 1151, \"nonsm\": 1152, \"smoke\": 1153},\n },\n \"VBT08\": {\"m\": {\"nonsm\": 1003, \"smoke\": 1005}, \"f\": {\"nonsm\": 997, \"smoke\": 999}},\n \"VBT15\": {\n \"m\": {\"unism\": 3273, \"nonsm\": 3265, \"smoke\": 3267},\n \"f\": {\"unism\": 3274, \"nonsm\": 3266, \"smoke\": 3268},\n },\n}\n\n\nAGE_BIN = [\n \"< 25\",\n \"25-34\",\n \"35-44\",\n \"45-54\",\n \"55-64\",\n \"65-74\",\n \">= 75\",\n]\n\nDATE_ID = {\n \"Expenditures_Healthcare\": {\n AGE_BIN[0]: \"CXUHEALTHLB0402M\",\n AGE_BIN[1]: \"CXUHEALTHLB0403M\",\n AGE_BIN[2]: \"CXUHEALTHLB0404M\",\n AGE_BIN[3]: \"CXUHEALTHLB0405M\",\n AGE_BIN[4]: \"CXUHEALTHLB0406M\",\n # \"65_or_Over\": \"CXUHEALTHLB0407M\",\n AGE_BIN[5]: \"CXUHEALTHLB0408M\",\n AGE_BIN[6]: \"CXUHEALTHLB0409M\",\n },\n \"Income_After_Taxes\": {\n AGE_BIN[0]: \"CXUINCAFTAXLB0402M\",\n AGE_BIN[1]: \"CXUINCAFTAXLB0403M\",\n AGE_BIN[2]: \"CXUINCAFTAXLB0404M\",\n AGE_BIN[3]: \"CXUINCAFTAXLB0405M\",\n AGE_BIN[4]: \"CXUINCAFTAXLB0406M\",\n # \"65_or_Over\": \"CXUINCAFTAXLB0407M\",\n AGE_BIN[5]: \"CXUINCAFTAXLB0408M\",\n AGE_BIN[6]: \"CXUINCAFTAXLB0409M\",\n },\n}\n\nFRED_URL_ROOT = \"https://fred.stlouisfed.org/graph/fredgraph.csv?id=\"\n\n# 2009-2013 Individual Life Insurance Mortality Experience Report\n# https://www.soa.org/resources/experience-studies/2017/2009-13-indiv-life-ins-mort-exp/\n","repo_name":"SFlashYang/premfin","sub_path":"premiumFinance/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"9"} +{"seq_id":"6364943153","text":"\"\"\"\nAdapted from https://github.com/rcmalli/keras-squeezenet/blob/master/keras_squeezenet/squeezenet.py\n\"\"\"\n\nfrom keras.models import Model\nfrom keras.layers import Input, Convolution2D, Activation, MaxPooling2D, GlobalAveragePooling2D, Dropout, concatenate\nfrom keras.utils import get_file\n\nWEIGHTS_PATH_NO_TOP = \"https://github.com/rcmalli/keras-squeezenet/releases/download/v1.0/squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5\"\n\ndef fire_module(x, fire_id, squeeze=16, expand=64):\n fire_id = 'fire' + str(fire_id) + '/'\n x = Convolution2D(squeeze, (1, 1), padding='valid', name=fire_id + 'squeeze1x1')(x)\n x = Activation('relu', name=fire_id + 'relu_squeeze1x1')(x)\n \n left = Convolution2D(expand, (1, 1), padding='valid', name=fire_id + 'expand1x1')(x)\n left = Activation('relu', name=fire_id + 'relu_expand1x1')(left)\n \n right = Convolution2D(expand, (3, 3), padding='same', name=fire_id + 'expand3x3')(x)\n right = Activation('relu', name=fire_id + 'relu_expand3x3')(right)\n \n x = concatenate([left, right], axis=3, name=fire_id + 'concat')\n \n return x\n\ndef SqueezeNet(input_shape, weights='imagenet'):\n inputs = Input(shape=input_shape)\n \n x = Convolution2D(64, (3, 3), strides=(2, 2), padding='valid', name='conv1')(inputs)\n x = Activation('relu', name='relu_conv1')(x)\n x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)\n\n x = fire_module(x, fire_id=2, squeeze=16, expand=64)\n x = fire_module(x, fire_id=3, squeeze=16, expand=64)\n x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)\n\n x = fire_module(x, fire_id=4, squeeze=32, expand=128)\n x = fire_module(x, fire_id=5, squeeze=32, expand=128)\n x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)\n\n x = fire_module(x, fire_id=6, squeeze=48, expand=192)\n x = fire_module(x, fire_id=7, squeeze=48, expand=192)\n x = fire_module(x, fire_id=8, squeeze=64, expand=256)\n x = fire_module(x, fire_id=9, squeeze=64, expand=256)\n\n model = Model(inputs, x, name='squeezenet')\n \n if weights == 'base_model':\n weights_path = 'base_model.h5'\n elif weights == 'imagenet':\n weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models')\n model.load_weights(weights_path)\n elif weights is not None:\n raise ValueError('Unknown `weights` argument')\n \n \n return model","repo_name":"maxemerling/COVID_CT","sub_path":"keras_squeezenet_old.py","file_name":"keras_squeezenet_old.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"22810018744","text":"class Solution:\n def numDecodings(self, s: str) -> int:\n # Initialize an array to store the number of ways to decode the string\n # ending at each index\n dp = [0] * (len(s) + 1)\n\n # Base case: there is only one way to decode an empty string\n dp[0] = 1\n\n # Iterate through the string, starting from the first character\n for i in range(1, len(s) + 1):\n # If the current character is non-zero, then it can be decoded as a\n # single character, so add the number of ways to decode the previous\n # substring to the current number of ways\n if s[i - 1] != '0':\n dp[i] += dp[i - 1]\n\n # If the current character and the previous character form a valid\n # two-digit number between 10 and 26, then those two characters can\n # be decoded as a single number, so add the number of ways to decode\n # the substring ending two characters before to the current number of\n # ways\n if i > 1 and 10 <= int(s[i - 2:i]) <= 26:\n dp[i] += dp[i - 2]\n\n # Return the number of ways to decode the entire string\n return dp[len(s)]\n","repo_name":"Ajay122002/Leetcode-Solutions","sub_path":"0091-decode-ways/0091-decode-ways.py","file_name":"0091-decode-ways.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"23971367229","text":"from django.urls import path\nfrom voting.api.v1.views import (\n RestaurantListCreateAPIView, RestaurantRUDAPIView,\n MenuListCreateAPIView, MenuRUDAPIView, VoteListCreateAPIView,\n VoteRUDAPIView, ResultAPIView, PublishResultAPIView\n)\n\n\napp_name = 'voting-api-v1'\n\nurlpatterns = [\n path(\n 'restaurants/',\n RestaurantListCreateAPIView.as_view(),\n name='restaurant-list-create'\n ),\n path(\n 'restaurants//',\n RestaurantRUDAPIView.as_view(),\n name='restaurant-rud'\n ),\n path('menus/', MenuListCreateAPIView.as_view(), name='menu-list-create'),\n path('menus//', MenuRUDAPIView.as_view(), name='menu-rud'),\n path('votes/', VoteListCreateAPIView.as_view(), name='vote-list-create'),\n path('votes//', VoteRUDAPIView.as_view(), name='vote-rud'),\n path('result/', ResultAPIView.as_view(), name='result'),\n path(\n 'publish-result/',\n PublishResultAPIView.as_view(),\n name='publish-result'\n ),\n]\n","repo_name":"ShehabAhmedSayem/Restaurant_Voting_System","sub_path":"voting/api/v1/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"31659336038","text":"from os.path import join as opj\nimport os\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom bids import BIDSLayout\nimport ptitprince as pt\nfrom scipy.io import loadmat\n###############################\n# Parameters\n###############################\nlayout = BIDSLayout('/data/source')\npart = ['sub-' + s for s in layout.get_subject()]\n\n# Remove stupid pandas warning\npd.options.mode.chained_assignment = None # default='warn'\n\n# Outpath for analysis\noutpath = '/data/derivatives/compmodels'\n# Outpath for figures\noutfigpath = '/data/derivatives/figures/compmodels'\nif not os.path.exists(outfigpath):\n os.mkdir(outfigpath)\n\nparam = {\n # Font sizez in plot\n 'titlefontsize': 24,\n 'labelfontsize': 24,\n 'ticksfontsize': 22,\n 'legendfontsize': 20,\n }\n\n# Despine\nplt.rc(\"axes.spines\", top=False, right=False)\nplt.rcParams['font.family'] = 'Arial Narrow'\n\n# ################################################################\n# Figure X SCR plot\n#################################################################\n# A) SCR raw/cond/Block\n# B) SCR predicted/cond/block\n# C) Trialwise SCR pred vs actual\n# D) Trialwise expected vs actual probability\n\n# Winning model\nwin = 'HGF2_intercue'\n\n# Load data\ndata = pd.read_csv(opj(outpath, win, win + '_data.csv'))\n\n# Remove shocks\ndata_ns = data.copy()\ndata_ns = data[data['cond'] != 'CS++']\n\n# Get average SCR/cond/block\ndata_avg_all = data_ns.groupby(['cond_plot',\n 'block'])['scr',\n 'pred'].mean().reset_index()\n\n# Get SD\ndata_se_all = data_ns.groupby(['cond_plot',\n 'block'])['scr',\n 'pred'].std().reset_index()\n# Divide by sqrt(n)\ndata_se_all.scr = data_se_all.scr / np.sqrt(len(set(data_ns['sub'])))\ndata_se_all.pred = data_se_all.pred / np.sqrt(len(set(data_ns['sub'])))\n\n\n# Init figure\nfig, ax = plt.subplots(figsize=(8, 5))\n\noff = 0.1 # Dots offset to avoid overlap\n\nfor cond in data_avg_all['cond_plot']:\n if cond[0:3] == 'CS+':\n label = 'CS+ / CSE'\n marker = 'o'\n color = \"#C44E52\"\n linestyle = '-'\n condoff = 0.05\n else:\n label = 'CS-1 / CS-2'\n marker = '^'\n color = '#4C72B0'\n linestyle = '--'\n condoff = -0.025\n dat_plot = data_avg_all[data_avg_all.cond_plot == cond].reset_index()\n dat_plot_se = data_se_all[data_se_all.cond_plot == cond]\n\n # len(dat_plot)\n if len(dat_plot) > 1:\n ax.errorbar(x=[dat_plot.block[0] + off, dat_plot.block[1] + condoff],\n y=dat_plot.scr,\n yerr=dat_plot_se.scr, label=label,\n marker=marker, color=color, ecolor=color,\n linestyle=linestyle, markersize=8, linewidth=2)\n else:\n ax.errorbar(x=[dat_plot.block[0] - off],\n y=dat_plot.scr,\n yerr=dat_plot_se.scr, label=label,\n marker=marker, color=color, ecolor=color,\n linestyle=linestyle, markersize=8, linewidth=2)\n\nfor line in [1.5, 2.5, 3.5, 4.5, 5.5, 6.5]:\n ax.axvline(x=line, linestyle=':', color='k', alpha=0.5)\nax.set_ylabel('SCR (beta estimate)', fontsize=param['labelfontsize'])\nax.set_xlabel('Block', fontsize=param['labelfontsize'])\n# ax1[0].set_ylim([0.1, 0.26])\nax.tick_params(labelsize=param['ticksfontsize'])\nhandles, labels = ax.get_legend_handles_labels()\nby_label = OrderedDict(zip(labels, handles))\nax.legend(by_label.values(), by_label.keys(),\n loc='best', fontsize=param[\"legendfontsize\"], frameon=False)\n\nfig.tight_layout()\nfig.savefig(opj(outfigpath, 'scr_average.svg'), dpi=600, bbox_inches='tight')\n\n\nfig, ax = plt.subplots(figsize=(8, 5))\n\n# SAME WITH PRED\n# Init figure\nfor cond in data_avg_all['cond_plot']:\n if cond[0:3] == 'CS+':\n label = 'CS+ / CSE'\n marker = 'o'\n color = \"#C44E52\"\n linestyle = '-'\n condoff = 0.05\n else:\n label = 'CS-1 / CS-2'\n marker = '^'\n color = '#4C72B0'\n linestyle = '--'\n condoff = -0.025\n dat_plot = data_avg_all[data_avg_all.cond_plot == cond].reset_index()\n dat_plot_se = data_se_all[data_se_all.cond_plot == cond]\n\n # len(dat_plot)\n if len(dat_plot) > 1:\n ax.errorbar(x=[dat_plot.block[0] + off, dat_plot.block[1] + condoff],\n y=dat_plot.pred,\n yerr=dat_plot_se.pred, label=label,\n marker=marker, color=color, ecolor=color,\n linestyle=linestyle, markersize=8, linewidth=2)\n else:\n ax.errorbar(x=[dat_plot.block[0] - off],\n y=dat_plot.pred,\n yerr=dat_plot_se.pred, label=label,\n marker=marker, color=color, ecolor=color,\n linestyle=linestyle, markersize=8, linewidth=2)\nfor line in [1.5, 2.5, 3.5, 4.5, 5.5, 6.5]:\n ax.axvline(x=line, linestyle=':', color='k', alpha=0.5)\nax.set_ylabel('Predicted SCR', fontsize=param['labelfontsize'])\nax.set_xlabel('Block', fontsize=param['labelfontsize'])\n# ax1[1].set_ylim([0.1, 0.26])\nax.tick_params(labelsize=param['ticksfontsize'])\n\nfig.tight_layout()\nfig.savefig(opj(outfigpath, 'pred_scr_average.svg'), dpi=600,\n bbox_inches='tight')\n\n\nfig, ax = plt.subplots(figsize=(8, 5))\n\n# Actual vs predicted /trial\ndeep_pal = sns.color_palette('deep')\n\ndata_ns['cond2'] = 0\ndata_ns['cond2'] = np.where(data_ns['cond'] == 'CS++',\n \"CS+\", data_ns['cond2'])\ndata_ns['cond2'] = np.where(data_ns['cond'] == 'CS-1',\n 'CS-1', data_ns['cond2'])\ndata_ns['cond2'] = np.where(data_ns['cond'] == 'CS-2',\n 'CS-2', data_ns['cond2'])\ndata_ns['cond2'] = np.where(data_ns['cond'] == 'CS+',\n \"CS+\", data_ns['cond2'])\n\ndata_ns['cond2'] = np.where(data_ns['cond'] == 'CS-E',\n \"CS-E\", data_ns['cond2'])\n\ndata_avg_all = data_ns.groupby(['block',\n 'trial_within_wb',\n 'cond'])['scr', 'pred',\n 'vhat'].mean().reset_index()\n\n\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS-1'],\n y=data_avg_all.scr[data_avg_all.cond == 'CS-1'],\n facecolors='none',\n color='#4C72B0',\n alpha=1,\n label='CS-1')\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS-2'],\n y=data_avg_all.scr[data_avg_all.cond == 'CS-2'],\n facecolors='none',\n color='#0d264f',\n alpha=1,\n label='CS-2')\n\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS+'],\n y=data_avg_all.scr[data_avg_all.cond == 'CS+'],\n label='CS+',\n facecolors='none',\n color=\"#C44E52\",\n alpha=1)\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS-E'],\n y=data_avg_all.scr[data_avg_all.cond == 'CS-E'],\n label='CS-E',\n facecolors='none',\n color=\"#55A868\",\n alpha=1)\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS-1'],\n y=data_avg_all.pred[data_avg_all.cond == 'CS-1'],\n color='#4C72B0',\n alpha=0.8,\n label='CS-1')\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS-2'],\n y=data_avg_all.pred[data_avg_all.cond == 'CS-2'],\n color='#0d264f',\n alpha=0.8,\n label='CS-2')\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS+'],\n y=data_avg_all.pred[data_avg_all.cond == 'CS+'],\n color=\"#C44E52\",\n alpha=0.8,\n label='CS+')\nax.scatter(x=data_avg_all.trial_within_wb[data_avg_all.cond == 'CS-E'],\n y=data_avg_all.pred[data_avg_all.cond == 'CS-E'],\n color=\"#55A868\",\n alpha=0.8,\n label='CS-E')\n\n\n# Find trials where new block begins\nlines = []\nfor idx in range((len(data_avg_all.block) - 1)):\n if data_avg_all.block[idx + 1] != data_avg_all.block[idx]:\n lines.append(data_avg_all.trial_within_wb[idx] + 0.5)\n\nfor line in lines:\n ax.axvline(x=line, linestyle=':', color='k', alpha=0.5)\n\n\nax.set_ylabel('Actual / Predicted SCR', fontsize=param['labelfontsize'])\nax.set_xlabel('Trials within condition and block',\n fontsize=param['labelfontsize'])\n\nax.tick_params(labelsize=param['ticksfontsize'])\nhandles, labels = ax.get_legend_handles_labels()\nby_label = OrderedDict(zip(labels, handles))\nax.legend(by_label.values(), by_label.keys(),\n loc='upper left', fontsize=param['legendfontsize']-6, frameon=True)\n\nfig.tight_layout()\nfig.savefig(opj(outfigpath, 'pred_scr_bytrial.svg'), dpi=600,\n bbox_inches='tight')\n\n\n# Estimated quantities throught time\ndata_ns['cond2'] = 0\ndata_ns['cond2'] = np.where(data_ns['cond'] == 'CS++',\n \"CS+\", data_ns['cond'])\n\n\ndata_avg_all = data_ns.groupby(['block',\n 'trial_within_wb_wcs',\n 'cond_plot2',\n 'cond2'])['scr',\n 'pred',\n 'sa1hat',\n 'sa2hat',\n 'vhat'].mean().reset_index()\n\nxlabels = [r'Expected value $(\\hat{\\mu}_1)$',\n r'Irreducible uncertainty $(\\hat{\\sigma}_1)$',\n r'Estimation uncertainty $(\\hat{\\sigma}_2)$']\nfor ucue in data_avg_all['cond_plot2'].unique():\n selected = data_avg_all[data_avg_all.cond_plot2 == ucue].reset_index()\n\nfor idx, to_plot in enumerate(['vhat', 'sa1hat', 'sa2hat']):\n fig, ax = plt.subplots(figsize=(8, 5))\n for ucue in data_avg_all['cond_plot2'].unique():\n selected = data_avg_all[data_avg_all.cond_plot2 == ucue].reset_index()\n\n if selected.cond_plot2.loc[0][0:3] == 'CS-':\n color1 = '#4C72B0'\n color2 = '#0d264f'\n leg1 = 'CS-1'\n leg2 = 'CS-2'\n else:\n color1 = '#c44e52'\n color2 = '#55a868'\n leg1 = 'CS+'\n leg2 = 'CS-E'\n\n sns.lineplot(x=selected.trial_within_wb_wcs,\n y=selected[to_plot],\n color=color1,\n alpha=1,\n ax=ax,\n label=leg1)\n\n if selected.block.unique().shape[0] > 1:\n\n selected2 = selected[selected.block\n == selected.block.unique()[1]]\n sns.lineplot(x=selected2.trial_within_wb_wcs,\n y=selected2[to_plot],\n color=color2,\n alpha=1,\n ax=ax,\n label=leg2)\n\n ax.set_ylabel(xlabels[idx], fontsize=param['labelfontsize'])\n ax.set_xlabel('Trials', fontsize=param['labelfontsize'])\n\n ax.tick_params(labelsize=param['ticksfontsize'])\n handles, labels = ax.get_legend_handles_labels()\n by_label = OrderedDict(zip(labels, handles))\n ax.legend(by_label.values(), by_label.keys(),\n loc='best', fontsize=param[\"legendfontsize\"]-6, frameon=True)\n\n for line in lines:\n ax.axvline(x=line, linestyle=':', color='k', alpha=0.5)\n\n fig.tight_layout()\n fig.savefig(opj(outfigpath, 'traj_bytrial_' + to_plot + '.svg'), dpi=600,\n bbox_inches='tight')\n\n\n# ################################################################\n# Parameters plot\n##################################################################\n\nfig, ax = plt.subplots(1, 4, figsize=(8, 5))\npal = sns.color_palette(\"deep\", 5)\nlabels = [r'$\\omega_2$', r'$\\beta_0$', r'$\\beta_1$', r'$\\zeta$']\nfor idx, var in enumerate(['om_2', 'be0', 'be1', 'ze']):\n\n data_param = data.groupby(['sub'])[var].mean().reset_index()\n\n dplot = data_param.melt(['sub'])\n\n pt.half_violinplot(x='variable', y=\"value\", data=dplot, inner=None,\n jitter=True, color=pal[idx], lwidth=0, width=0.6,\n offset=0.17, cut=1, ax=ax[idx],\n linewidth=1, alpha=0.6, zorder=19)\n sns.stripplot(x='variable', y=\"value\", data=dplot,\n jitter=0.08, ax=ax[idx],\n linewidth=1, alpha=0.6, color=pal[idx], zorder=1)\n sns.boxplot(x='variable', y=\"value\", data=dplot,\n color=pal[idx], whis=np.inf, linewidth=1, ax=ax[idx],\n width=0.1, boxprops={\"zorder\": 10, 'alpha': 0.5},\n whiskerprops={'zorder': 10, 'alpha': 1},\n medianprops={'zorder': 11, 'alpha': 0.5})\n ax[idx].set_xticklabels([labels[idx]], fontsize=param['labelfontsize'])\n if idx == 0:\n ax[idx].set_ylabel('Value', fontsize=param['labelfontsize'])\n else:\n ax[idx].set_ylabel('')\n ax[idx].set_xlabel('')\n ax[idx].tick_params('y', labelsize=param['ticksfontsize']-4)\n ax[idx].tick_params('x', labelsize=param['ticksfontsize'])\n\n fig.tight_layout()\n fig.savefig(opj(outfigpath, 'model_parameters.svg'), dpi=600)\n\n\n# ################################################################\n# Model comparison plots\n##################################################################\n\n# Compare families\nfamcomp = loadmat(opj('/data/derivatives/compmodels/',\n 'compare_families_VBA_model_comp.mat'))\n\nmodnames = [str(m[0]) for m in famcomp['out']['options'][0][0][0][0][0][0]]\nmodnames = [m.replace('_nointercue', '\\ncue specific') for m in modnames]\nmodnames = [m.replace('_intercue', '\\ninter-cue') for m in modnames]\nmodnames.append('Family\\ncue specific')\nmodnames.append('Family\\ninter-cue')\n\n\nep = list(famcomp['out']['ep'][0][0][0])\nef = [float(ef)*100 for ef in famcomp['out']['Ef'][0][0]]\n\nef_fam = famcomp['out']['families'][0][0][0][0][4]\nep_fam = famcomp['out']['families'][0][0][0][0][6]\n\nep.append(ep_fam[0][0])\nep.append(ep_fam[0][1])\nef.append(float(ef_fam[0])*100)\nef.append(float(ef_fam[1])*100)\n\nmodnames = np.asarray(modnames)[np.asarray([0, 2, 4, 6, 1, 3, 5, 7, 8, 9])]\nep = np.asarray(ep)[np.asarray([0, 2, 4, 6, 1, 3, 5, 7, 8, 9])]\nef = np.asarray(ef)[np.asarray([0, 2, 4, 6, 1, 3, 5, 7, 8, 9])]\nfig, host = plt.subplots(figsize=(12, 5))\n\npar1 = host.twinx()\ncolor1 = '#7293cb'\ncolor2 = '#e1974c'\n\nx = np.arange(0.5, (len(ep))*0.75, 0.75)\nx2 = [c + 0.25 for c in x]\np1 = host.bar(x, ep, width=0.25, color=color1, linewidth=1, edgecolor='k')\np2 = par1.bar(x2, ef, width=0.25, color=color2, linewidth=1, edgecolor='k')\n\nhost.set_ylim(0, 1)\npar1.set_ylim(0, 100)\n\n\n# host.set_xlabel(\"Distance\")\nhost.set_ylabel(\"Exceedance probability\", fontsize=param[\"labelfontsize\"])\npar1.set_ylabel(\"Model Frequency (%)\", fontsize=param[\"labelfontsize\"])\n\n\nfor ax in [par1]:\n ax.set_frame_on(True)\n ax.patch.set_visible(False)\n\n plt.setp(ax.spines.values(), visible=False)\n ax.spines[\"right\"].set_visible(True)\n\nhost.yaxis.label.set_color(color1)\npar1.yaxis.label.set_color(color2)\n\nhost.spines[\"left\"].set_edgecolor(color1)\npar1.spines[\"right\"].set_edgecolor(color2)\nhost.axvline(3.25, linestyle='--', color='gray')\nhost.axvline(6.25, linestyle='--', color='gray')\n\nhost.set_xticks([i+0.125 for i in x])\nhost.set_xticklabels(modnames, size=param['ticksfontsize'])\n\nhost.tick_params(axis='x', labelsize=param['labelfontsize']-10)\n\nhost.tick_params(axis='y', colors=color1, labelsize=param['labelfontsize'])\npar1.tick_params(axis='y', colors=color2, labelsize=param['labelfontsize'])\nfig.tight_layout()\nfig.savefig(opj(outfigpath, 'model_comparison_families.svg'), dpi=600)\n\n\n# Compare intercues\nfamcomp = loadmat(opj('/data/derivatives/compmodels/',\n 'compare_intercues_VBA_model_comp.mat'))\n\nmodnames = [str(m[0]) for m in famcomp['out']['options'][0][0][0][0][0][0]]\nmodnames = [m.replace('_intercue', '') for m in modnames]\n\n\nep = famcomp['out']['ep'][0][0][0]\nef = [float(ef)*100 for ef in famcomp['out']['Ef'][0][0]]\n\nfig, host = plt.subplots(figsize=(8, 5))\n\npar1 = host.twinx()\ncolor1 = '#7293cb'\ncolor2 = '#e1974c'\n\nx = np.arange(0.5, (len(ep))*0.75, 0.75)\nx2 = [c + 0.25 for c in x]\np1 = host.bar(x, ep, width=0.25, color=color1, linewidth=1, edgecolor='k')\np2 = par1.bar(x2, ef, width=0.25, color=color2, linewidth=1, edgecolor='k')\n\nhost.set_ylim(0, 1)\npar1.set_ylim(0, 100)\n\n\n# host.set_xlabel(\"Distance\")\nhost.set_ylabel(\"Exceedance probability\", fontsize=param[\"labelfontsize\"])\npar1.set_ylabel(\"Model Frequency (%)\", fontsize=param[\"labelfontsize\"])\n\n\nfor ax in [par1]:\n ax.set_frame_on(True)\n ax.patch.set_visible(False)\n\n plt.setp(ax.spines.values(), visible=False)\n ax.spines[\"right\"].set_visible(True)\n\nhost.yaxis.label.set_color(color1)\npar1.yaxis.label.set_color(color2)\n\nhost.spines[\"left\"].set_edgecolor(color1)\npar1.spines[\"right\"].set_edgecolor(color2)\n\nhost.set_xticks([i+0.125 for i in x])\nhost.set_xticklabels(modnames, size=param['ticksfontsize'])\n\nhost.tick_params(axis='x', labelsize=param['labelfontsize'])\n\nhost.tick_params(axis='y', colors=color1, labelsize=param['labelfontsize'])\npar1.tick_params(axis='y', colors=color2, labelsize=param['labelfontsize'])\nfig.tight_layout()\nfig.savefig(opj(outfigpath, 'model_comparison_intercues.svg'), dpi=600)\n","repo_name":"Nian-Jingqing/painlearning","sub_path":"code/figures/figures_compmodels.py","file_name":"figures_compmodels.py","file_ext":"py","file_size_in_byte":17338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"18610931603","text":"import numpy as np\n\n# pub input example\npubInputSample1 = 'A 1 1 1'\npubInputSample2 = 'A 2 2 2'\npubInputSample3 = 'b 1 1 1'\npubInputSamples = [pubInputSample1, pubInputSample2, pubInputSample3]\n\n# create defailts\nA = np.array([[1, 1], [2, 2]])\nb = np.array([1, 1])\n\n\ndef string2array(pubInputString):\n sel_matrix = pubInputString[0] # select matrix to manipulate\n row = int(pubInputString[2])\n element1 = int(pubInputString[4])\n element2 = int(pubInputString[6])\n # print(sel_matrix, row, element1, element2)\n\n return sel_matrix, row, element1, element2\n\n\ndef solver(sel_matrix, row, element1, element2):\n global A, b\n\n # manipulate matrices\n if sel_matrix == 'A':\n A[row-1, :] = np.array([element1, element2])\n elif sel_matrix == 'b':\n b = np.array([element1, element2])\n\n #print('A: ', A)\n #print('b: ', b)\n\n x = np.matmul(A, b.T)\n\n return x\n\n\ndef returntoPub(x):\n xString = f'x {x[0]} {x[1]}'\n return xString\n\n\ndef processInput(sample):\n sel_matrix, row, element1, element2 = string2array(sample)\n x = solver(sel_matrix, row, element1, element2)\n xString = returntoPub(x)\n return xString\n\n\ni = 1\n# # perform local test\nfor sample in pubInputSamples:\n print(f'case {i}: ')\n xString = processInput(sample)\n print('xString: ', xString)\n print('-----------')\n i += 1\n","repo_name":"fearmansighah/l5","sub_path":"processData.py","file_name":"processData.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"28301132051","text":"#! /usr/bin/env python\n# vim: set fileencoding=utf-8 :\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nusage = \"\"\"\\\nBuilds a matrix of 3d rgb histogram distances for all images in the\ndirectory passed in.\n\"\"\"\nimport argparse\nimport os\nimport logging\nimport glob\nimport csv\n\nimport numpy as np\nimport cv2\n\nparser = argparse.ArgumentParser(\n description=usage,\n formatter_class=argparse.RawDescriptionHelpFormatter)\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\",\n default=True, help=\"be more verbose\")\ngroup.add_argument(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\",\n default=True, help=\"be quiet\")\nparser.add_argument(\"-b\", \"--bins\", dest=\"bins\", type=int, default=8,\n help=\"number of bins across each dimension in histogram\")\nparser.add_argument(\"image_directory\", help=\"directory of images\")\nparser.add_argument(\"outfile\", type=argparse.FileType('w'),\n default=\"distances.csv\", help=\"CSV output file\")\n\nargs = parser.parse_args()\n\nif args.verbose:\n logger = logging.getLogger('root')\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(name)-12s: %(levelname)-8s %(message)s\")\n console.setFormatter(formatter)\n logger.addHandler(console)\n\n\ndef get_histogram(image, bins):\n \"\"\" calculate a 3d RGB histogram from an image \"\"\"\n imgarray = cv2.imread(image)\n hist = cv2.calcHist([imgarray], [0, 1, 2], None,\n [bins, bins, bins],\n [0, 256, 0, 256, 0, 256])\n hist = cv2.normalize(hist, hist)\n\n return hist.flatten()\n\n\ndef chi2_distance(a, b, eps=1e-10):\n \"\"\" distance between two histograms (a, b) \"\"\"\n d = 0.5 * np.sum([((x - y) ** 2) / (x + y + eps)\n for (x, y) in zip(a, b)])\n\n return d\n\nfiles = glob.glob(\"{d}/*.jpg\".format(d=args.image_directory))\nhistograms = {}\nfor filename in files:\n (d, f) = os.path.split(filename)\n logger.info(\"calculating histogram for {f}\".format(f=filename))\n histograms[f] = get_histogram(filename, args.bins)\n\ncsv_out = csv.writer(args.outfile)\ncsv_out.writerow((\"a\", \"b\", \"distance\"))\nfor a in histograms:\n logger.info(\"calculating distances for {f}\".format(f=a))\n for b in histograms:\n if a != b:\n d = chi2_distance(histograms[a], histograms[b])\n csv_out.writerow((a, b, d))\n","repo_name":"cswingle/opencv_3d_rgb_histograms","sub_path":"rgb_histogram.py","file_name":"rgb_histogram.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"72147261095","text":"from django.urls import include, path\n\nfrom tools import views\n\napp_name = \"tools\"\n\nurlpatterns = [\n path(\"sslscan/\", views.SslScanList.as_view(), name=\"sslscan\"),\n path(\"sslscanlaunch/\", views.SslScanLaunch.as_view(), name=\"sslscanlaunch\"),\n path(\"sslscan_result/\", views.SslScanResult.as_view(), name=\"sslscan_result\"),\n path(\"sslcan_del/\", views.SslScanDelete.as_view(), name=\"sslcan_del\"),\n # Nikto requests\n path(\"nikto/\", views.NiktoScanList.as_view(), name=\"nikto\"),\n path(\"niktolaunch/\", views.NiktoScanLaunch.as_view(), name=\"niktolaunch\"),\n path(\"nikto_result/\", views.NiktoScanResult.as_view(), name=\"nikto_result\"),\n path(\"nikto_scan_del/\", views.NiktoScanDelete, name=\"nikto_scan_del\"),\n path(\"nikto_result_vul/\", views.NiktoResultVuln.as_view(), name=\"nikto_result_vul\"),\n path(\"nikto_vuln_del/\", views.NiktoVulnDelete.as_view(), name=\"nikto_vuln_del\"),\n # nmap requests\n path(\"nmap_scan/\", views.NmapScan.as_view(), name=\"nmap_scan\"),\n path(\"nmap/\", views.Nmap.as_view(), name=\"nmap\"),\n path(\"nmap_result/\", views.NmapResult.as_view(), name=\"nmap_result\"),\n path(\"nmap_scan_del/\", views.NmapScanDelete.as_view(), name=\"nmap_scan_del\"),\n # Nmap_Vulners\n path(\"nmap_vulners_scan/\", views.nmap_vulners_scan, name=\"nmap_scan\"),\n path(\"nmap_vulners/\", views.nmap_vulners, name=\"nmap_vulners\"),\n path(\"nmap_vulners_port_list/\", views.nmap_vulners_port, name=\"nmap_vulners_port\"),\n]\n","repo_name":"archerysec/archerysec","sub_path":"tools/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":2148,"dataset":"github-code","pt":"9"} +{"seq_id":"47451422","text":"'''\nQuestão #9\nPegue a lista gerada no exercício anterior e transforme cada um dos itens dessa lista em um float.\nOBS: Não é para alterar o programa anterior, mas sim a lista gerada por ele.\n'''\n\nnumero = 5\ncontador = numero + 1\nlista = []\n\nfor elemento in range(1, contador):\n lista.append(input(f\"Digite o {elemento}° numero: \"))\n\nlista_float = []\n\nfor elemento in lista:\n lista_float.append(float(elemento))\n \nprint(f\"Sua lista é: {lista_float}\")","repo_name":"angelitasantos/desenvolve40magalu-letscode","sub_path":"modulo01-logica-programacao/004-listas/aula004-exercicios009.py","file_name":"aula004-exercicios009.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"9"} +{"seq_id":"33145668721","text":"'''import hashlib\n\ninput = 'Gasdj cxzcxz cxzdsqwe'\nhashed = hashlib.sha256(input.encode('UTF-8')).hexdigest()'''\nfrom collections import defaultdict\n\ndef load_words(filename=r'C:\\Users\\79268\\Dev\\csvs\\zdf-win.txt'):\n with open(filename) as f:\n for word in f:\n yield word.rstrip()\n\ndef get_anagrams(source):\n d = defaultdict(list)\n for word in source:\n key = \"\".join(sorted(word))\n d[key].append(word)\n return d\n\ndef anagrams(word_source):\n d = get_anagrams(word_source)\n return d\n\ndef permutations(iterable, r=None):\n # permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC\n # permutations(range(3)) --> 012 021 102 120 201 210\n pool = tuple(iterable)\n n = len(pool)\n r = n if r is None else r\n if r > n:\n return\n indices = range(n)\n cycles = range(n, n-r, -1)\n yield tuple(pool[i] for i in indices[:r])\n while n:\n for i in reversed(range(r)):\n cycles[i] -= 1\n if cycles[i] == 0:\n indices[i:] = indices[i+1:] + indices[i:i+1]\n cycles[i] = n - i\n else:\n j = cycles[i]\n indices[i], indices[-j] = indices[-j], indices[i]\n yield tuple(pool[i] for i in indices[:r])\n break\n else:\n return\n\n \nword_source = load_words()\n#print(anagrams(word_source))\npermutations('слово', anagrams(word_source))\n\n\n\n","repo_name":"bitcoineazy/Study","sub_path":"семинар21.12.py","file_name":"семинар21.12.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"9"} +{"seq_id":"22576768930","text":"#\n# @lc app=leetcode.cn id=704 lang=python3\n#\n# [704] 二分查找\n#\nfrom typing import *\n\n\n# @lc code=start\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n i, j = 0, len(nums) - 1\n while (i <= j):\n # x = (i + j) // 2 # 取平均数不要使用这种写法了,看上去有溢出风险\n x = i + (j - i) // 2\n if nums[x] == target:\n return x\n elif nums[x] > target:\n j = x - 1\n else:\n i = x + 1\n return -1\n\n\n# @lc code=end\n\nprint(Solution().search([1], 1))\nprint(Solution().search([1, 2], 1))\n","repo_name":"HuangZhuo/litcode","sub_path":"src/704.二分查找.py","file_name":"704.二分查找.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"32714969967","text":"\n# coding: utf-8\n\n# In[9]:\n\nimport cv2\nimport numpy as np\n\n\n# In[10]:\n\ndef adjust(x):\n pass\n \n\n\n# In[11]:\n\n#Adjustment sliders for color range\n#cv2.namedWindow('red')\n#max and min red HSV values (Hue, Saturation Value)\n#For typical red objects the hue should be between 155 and 185. \n#Glowsticks will probably need some modifications and testing\n'''\nminHRed=155\nmaxHRed=185\nminSRed=0\nmaxSRed=255\nminVRed=0\nmaxVRed=255\n#default values\nlowerHRedDefault=160\nupperHRedDefault=180\n# define color ranges in HSV\nlower_red = np.array([lowerHRedDefault*-1, minSRed, minVRed])\nupper_red = np.array([upperHRedDefault, maxSRed, maxVRed]) \n\n\n# create trackbars \ncv2.createTrackbar('Hue_Upper', 'red', upper_red[0], abs(maxHRed-minHRed), adjust)\ncv2.createTrackbar('Hue_Lower', 'red', lower_red[0], abs(maxHRed-minHRed), adjust)\ncv2.createTrackbar('Sat_Upper', 'red', upper_red[1], abs(maxSRed-minSRed), adjust)\ncv2.createTrackbar('Sat_Lower', 'red', lower_red[1], abs(maxSRed-minSRed), adjust)\ncv2.createTrackbar('Val_Upper', 'red', upper_red[2], abs(maxVRed-minVRed), adjust)\ncv2.createTrackbar('Val_Lower', 'red', lower_red[2], abs(maxVRed-minVRed), adjust)\n\ncv2.namedWindow('green')\nminHGreen=40\nmaxHGreen=80\nminSGreen=10\nmaxSGreen=255\nminVGreen=10\nmaxVGreen=255\n#default values\nlowerHGreenDefault=50\nupperHGreenDefault=70\n# define color ranges in HSV\n# FIXME multiply by -1 to set initial values; this needs a better solution\nlower_green = np.array([lowerHGreenDefault*-1, minSGreen*-1, minVGreen*-1])\nupper_green = np.array([upperHGreenDefault, maxSGreen, maxVGreen]) \n\n# create trackbars \ncv2.createTrackbar('Hue_Upper', 'green', upper_green[0], abs(maxHGreen-minHGreen), adjust)\ncv2.createTrackbar('Hue_Lower', 'green', lower_green[0], abs(maxHGreen-minHGreen), adjust)\ncv2.createTrackbar('Sat_Upper', 'green', upper_green[1], abs(maxSGreen-minSGreen), adjust)\ncv2.createTrackbar('Sat_Lower', 'green', lower_green[1], abs(maxSGreen-minSGreen), adjust)\ncv2.createTrackbar('Val_Upper', 'green', upper_green[2], abs(maxVGreen-minVGreen), adjust)\ncv2.createTrackbar('Val_Lower', 'green', lower_green[2], abs(maxVGreen-minVGreen), adjust)\n'''\n\ncv2.namedWindow('testing')\nminHTesting=0\nmaxHTesting=255\nminSTesting=0\nmaxSTesting=255\nminVTesting=0\nmaxVTesting=255\n#default values\nlowerHTestingDefault=0\nupperHTestingDefault=255\n# define color ranges in HSV\n\nlower_testing = np.array([lowerHTestingDefault, minSTesting, minVTesting])\nupper_testing = np.array([upperHTestingDefault, maxSTesting, maxVTesting]) \n\n\n# create trackbars \ncv2.createTrackbar('Hue_Upper', 'testing', upper_testing[0], abs(maxHTesting-minHTesting), adjust)\ncv2.createTrackbar('Hue_Lower', 'testing', lower_testing[0], abs(maxHTesting-minHTesting), adjust)\ncv2.createTrackbar('Sat_Upper', 'testing', upper_testing[1], abs(maxSTesting-minSTesting), adjust)\ncv2.createTrackbar('Sat_Lower', 'testing', lower_testing[1], abs(maxSTesting-minSTesting), adjust)\ncv2.createTrackbar('Val_Upper', 'testing', upper_testing[2], abs(maxVTesting-minVTesting), adjust)\ncv2.createTrackbar('Val_Lower', 'testing', lower_testing[2], abs(maxVTesting-minVTesting), adjust)\n\n\n#minHGreen=50 \n#cv2.createTrackbar('lower_green_adjust', 'green', lower_green[0], 79, adjust)\n#cv2.createTrackbar('upper_green_adjust', 'green', upper_green[0], 79, adjust)\n\n\n# In[12]:\n\ncap = cv2.VideoCapture(0)\n\n\nwhile(1):\n \n # Take each frame\n _, frame = cap.read()\n\n # Convert BGR to HSV\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n # Threshold the selected HSV colors\n #mask_red = cv2.inRange(hsv, lower_red, upper_red)\n \n #mask_green = cv2.inRange(hsv, lower_green, upper_green)\n \n mask_testing = cv2.inRange(hsv, lower_testing, upper_testing)\n\n # Bitwise-AND mask and original image\n #ratio='Red:Green '+str(cv2.countNonZero(mask_red))+\":\"+str(cv2.countNonZero(mask_green))\n #res = cv2.bitwise_and(frame,frame, mask= mask_red)\n \n #calculate the resulting image - this may be much slower than just dealing with the masks consider reverting\n '''\n res_red=cv2.bitwise_and(frame, frame, mask=mask_red)\n res_green=cv2.bitwise_and(frame, frame, mask=mask_green)\n '''\n \n res_testing=cv2.bitwise_and(frame, frame, mask=mask_testing)\n \n # add red:green ratio to window\n # cv2.putText(frame, ratio, (10,50), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 0), 2 )\n \n # take red:green measurments above this point \n \n # all text additions need to be made AFTER measurements have been taken!\n # add adjustment slider values to window\n '''\n cv2.putText(res_red, \"Low:\"+str(lower_red), (10,50), cv2.FONT_HERSHEY_PLAIN, 2, (255,255,255), 2 )\n cv2.putText(res_red, \"Up: \"+str(upper_red), (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2) \n \n cv2.putText(res_green, \"Low:\"+str(lower_green), (10,50), cv2.FONT_HERSHEY_PLAIN, 2, (255,255,255), 2 )\n cv2.putText(res_green, \"Up: \"+str(upper_green), (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2)\n '''\n \n cv2.putText(res_testing, \"Low:\"+str(lower_testing), (10,50), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2)\n cv2.putText(res_testing, \"Up: \"+str(upper_testing), (10, 100), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 2)\n\n \n \n #live image \n cv2.imshow('live',frame)\n \n '''\n cv2.imshow('red', res_red)\n \n cv2.imshow('green', res_green)\n '''\n \n cv2.imshow('testing', res_testing)\n\n #read and adjust values from sliders\n '''\n lower_red[0]=minHRed+cv2.getTrackbarPos('Hue_Lower', 'red')\n upper_red[0]=minHRed+cv2.getTrackbarPos('Hue_Upper', 'red')\n lower_red[1]=minSRed+cv2.getTrackbarPos('Sat_Lower', 'red')\n upper_red[1]=minSRed+cv2.getTrackbarPos('Sat_Upper', 'red')\n lower_red[2]=minVRed+cv2.getTrackbarPos('Val_Lower', 'red')\n upper_red[2]=minVRed+cv2.getTrackbarPos('Val_Upper', 'red')\n\n #FIXME Lower HSV are changed at this point and jump from default values\n # problem is with minHGreen+gtbposition <-\n lower_green[0]=minHGreen+cv2.getTrackbarPos('Hue_Lower', 'green')\n upper_green[0]=minHGreen+cv2.getTrackbarPos('Hue_Upper', 'green')\n lower_green[1]=minSGreen+cv2.getTrackbarPos('Sat_Lower', 'green')\n upper_green[1]=minSGreen+cv2.getTrackbarPos('Sat_Upper', 'green')\n lower_green[2]=minVGreen+cv2.getTrackbarPos('Val_Lower', 'green')\n upper_green[2]=minVGreen+cv2.getTrackbarPos('Val_Upper', 'green')\n '''\n \n #wide open range for testing\n lower_testing[0]=minHTesting+cv2.getTrackbarPos('Hue_Lower', 'testing')\n upper_testing[0]=minHTesting+cv2.getTrackbarPos('Hue_Upper', 'testing')\n lower_testing[1]=minSTesting+cv2.getTrackbarPos('Sat_Lower', 'testing')\n upper_testing[1]=minSTesting+cv2.getTrackbarPos('Sat_Upper', 'testing')\n lower_testing[2]=minVTesting+cv2.getTrackbarPos('Val_Lower', 'testing')\n upper_testing[2]=minVTesting+cv2.getTrackbarPos('Val_Upper', 'testing')\n\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows()\ncv2.waitKey(1)\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n","repo_name":"txoof/crowd_pong","sub_path":"Depricated/live_HSV_testing.py","file_name":"live_HSV_testing.py","file_ext":"py","file_size_in_byte":7039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"74118501093","text":"from datetime import datetime\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy as sa\nimport socket\nimport warnings\nfrom urllib.error import HTTPError\n\nfrom astroquery.mast import Observations, Catalogs\nfrom astroquery.exceptions import NoResultsWarning\n\nfrom astropy.coordinates import SkyCoord\nimport astropy.io.fits as fits\n\n\n# from src.source import angle_diff\nfrom src.database import SmartSession\nfrom src.observatory import VirtualObservatory, ParsObservatory\nfrom src.dataset import RawPhotometry, Lightcurve\nfrom src.catalog import Catalog\nfrom src.source import Source\n\n\n# from src.dataset import DatasetMixin, RawPhotometry, Lightcurve\n\n\nclass ParsObsTESS(ParsObservatory):\n\n # must register this observatory in list of allowed names\n ParsObservatory.add_to_obs_names(\"TESS\")\n\n def __init__(self, **kwargs):\n\n super().__init__(\"tess\")\n\n self.preferred_catalog_mag = \"Gaia_RP\"\n\n self.distance_thresh = self.add_par(\n \"distance_thresh\",\n 10.0,\n float,\n \"Distance threshold in arcseconds for flagging \"\n \"close stars while querying TIC.\",\n )\n\n self.mag_limit = self.add_par(\n \"mag_limit\",\n 18.0,\n float,\n \"Magnitude limit for querying TIC.\",\n )\n\n self.magdiff_thresh = self.add_par(\n \"magdiff_thresh\",\n 0.75,\n float,\n \"Magnitude difference threshold for flagging \"\n \"similar stars while querying TIC.\",\n )\n\n self.query_radius = self.add_par(\n \"cat_query_radius\",\n 360,\n float,\n \"Radius in arcsec for cone search for MAST queries:\"\n \"Catalog query is to find the TESS ID for given source.\"\n \"Observations query is to find data from TIC for given source.\",\n )\n\n self.use_simple_flux = self.add_par(\n \"use_simple_flux\",\n False,\n bool,\n \"Use simple flux (SAP_FLUX) instead of calibrated flux (PDCSAP_FLUX) for TESS data.\",\n )\n\n self.use_psf_positions = self.add_par(\n \"use_psf_positions\",\n False,\n bool,\n \"Use PSF positions instead of centroids for TESS data.\",\n )\n\n self.download_pars_list = [\"distance_thresh\", \"magdiff_thresh\", \"query_radius\"]\n\n self._enforce_type_checks = True\n self._enforce_no_new_attrs = True\n\n config = self.load_then_update(kwargs)\n\n # apply parameters specific to this class\n self._apply_specific_pars(config)\n\n\nclass VirtualTESS(VirtualObservatory):\n def __init__(self, **kwargs):\n \"\"\"\n Generate an instance of a VirtualTESS object.\n This can be used to download TESS data\n and run analysis on it.\n\n Parameters\n ----------\n Are the same as the VirtualObservatory class.\n The only difference is that the obs_name is set to \"tess\".\n \"\"\"\n\n self.pars = self._make_pars_object(kwargs)\n super().__init__(name=\"tess\")\n\n @staticmethod\n def _make_pars_object(kwargs):\n \"\"\"\n Make the ParsObsTESS object.\n When writing a subclass of this class\n that has its own subclassed Parameters,\n this function will allow the constructor\n of the superclass to instantiate the correct\n subclass Parameters object.\n \"\"\"\n return ParsObsTESS(**kwargs)\n\n def reduce_photometry(\n self,\n dataset,\n source=None,\n init_kwargs={},\n **_,\n ):\n \"\"\"\n Reduce the raw photometry to usable lightcurves.\n The data is all from a single filter, but it should\n still be split up into sectors.\n\n Parameters\n ----------\n dataset: a src.dataset.RawPhotometry object\n The raw data to reduce.\n source: src.source.Source object\n The source to which the dataset belongs.\n If None, the reduction will not use any\n data of the source, such as the expected\n magnitude, the position, etc.\n init_kwargs: dict\n A dictionary of keyword arguments to be\n passed to the constructor of the new dataset.\n # TODO: need to add more parameters for advanced detrending\n\n Returns\n -------\n a list of src.dataset.Lightcurve objects\n The reduced datasets, after minimal processing.\n The reduced datasets will have uniform filter,\n each dataset will be sorted by time,\n and some initial processing will be done,\n using the \"reduce_kwargs\" parameter (or function inputs).\n \"\"\"\n self._check_dataset(\n dataset, DataClass=RawPhotometry, allowed_dataclasses=[pd.DataFrame]\n )\n\n # get the altdata from the init_kwargs (if it is there)\n altdata_base = init_kwargs.pop(\"altdata\", dataset.altdata)\n\n if \"zero_point_instrumental\" not in init_kwargs:\n # from the TESS instrument handbook section 7.1\n # https://archive.stsci.edu/files/live/sites/mast/files/home/missions-and-data/active-missions/tess/_documents/TESS_Instrument_Handbook_v0.1.pdf\n init_kwargs[\"zero_point_instrumental\"] = 20.44\n\n # split the dataframe into sectors\n if len(dataset.data) == 0:\n return []\n dfs = dataset.data.groupby(\"SECTOR\")\n sectors = [df[0] for df in dfs]\n new_datasets = []\n for df_tuple in dfs:\n sector = df_tuple[0]\n df = df_tuple[1]\n new_altdata = altdata_base.copy()\n new_altdata[\"sectors\"] = sector\n new_altdata[\"filter\"] = \"TESS\"\n\n idx = None\n for i in range(len(sectors)):\n if int(altdata_base[\"file_headers\"][i][\"SECTOR\"]) == int(sector):\n idx = i\n break\n if idx is None:\n raise ValueError(\"Could not find sector in altdata.\")\n new_altdata[\"file_headers\"] = [altdata_base[\"file_headers\"][idx]]\n new_altdata[\"lightcurve_headers\"] = [\n altdata_base[\"lightcurve_headers\"][idx]\n ]\n new_altdata[\"aperture_arrays\"] = [altdata_base[\"aperture_arrays\"][idx]]\n new_altdata[\"aperture_headers\"] = [altdata_base[\"aperture_headers\"][idx]]\n new_altdata[\"ra\"] = float(altdata_base[\"file_headers\"][idx][\"RA_OBJ\"])\n new_altdata[\"dec\"] = float(altdata_base[\"file_headers\"][idx][\"DEC_OBJ\"])\n sector = altdata_base[\"file_headers\"][idx][\"SECTOR\"]\n camera = altdata_base[\"file_headers\"][idx][\"CAMERA\"]\n ccd = altdata_base[\"file_headers\"][idx][\"CCD\"]\n new_altdata[\"series_name\"] = f\"TESS_{sector}_{camera}_{ccd}\"\n new_altdata[\"object_id\"] = str(altdata_base[\"file_headers\"][idx][\"TICID\"])\n new_altdata[\"time_stamp_alignment\"] = {\n 0.0: \"start\",\n 0.5: \"middle\",\n 1.0: \"end\",\n }.get(float(altdata_base[\"lightcurve_headers\"][idx][\"TIMEPIXR\"]), 0.5)\n if len(df) > 0:\n lc = Lightcurve(data=df, altdata=new_altdata, **init_kwargs)\n new_datasets.append(lc)\n\n # calculate the measured zero point\n tessmag = new_altdata[\"file_headers\"][0].get(\"TESSMAG\")\n if tessmag is not None:\n zp = tessmag + 2.5 * np.log10(lc.data[\"flux\"].median())\n lc.zero_point_measured = zp\n\n # TODO: add more processing here (e.g., detrending)\n\n return new_datasets\n\n def get_colmap_time_info(self, data=None, altdata=None):\n \"\"\"\n Update the column map of the dataset.\n This parses the time and flux columns\n correctly, including the specific time offset\n of this mission and the preferred flux type.\n\n Parameters\n ----------\n data: pandas.DataFrame\n The raw data to be parsed. Sometimes the raw data\n contains information about the columns or the time format.\n altdata: dict\n The altdata dictionary to be updated.\n Sometimes the altdata contains info like the time offset.\n\n Returns\n -------\n colmap: dict (optional)\n A dictionary mapping the column names in the raw dataset\n to the standardized names in the raw dataset.\n time_info: dict (optional)\n A dictionary with information about the time column in the raw dataset.\n \"\"\"\n colmap = {}\n time_info = {}\n\n time_info[\"offset\"] = 2457000.0\n # get this from the altdata\n if altdata is not None and len(altdata.get(\"lightcurve_headers\", [])) > 0:\n integer_offset = altdata[\"lightcurve_headers\"][0][\"BJDREFI\"]\n fractional_offset = altdata[\"lightcurve_headers\"][0][\"BJDREFF\"]\n time_info[\"offset\"] = integer_offset + fractional_offset\n time_info[\"format\"] = \"jd\"\n colmap[\"time\"] = \"TIME\"\n\n colmap[\"flux\"] = \"PDCSAP_FLUX\"\n colmap[\"fluxerr\"] = \"PDCSAP_FLUX_ERR\"\n if self.pars.use_simple_flux:\n colmap[\"flux\"] = \"SAP_FLUX\"\n colmap[\"fluxerr\"] = \"SAP_FLUX_ERR\"\n\n colmap[\"time_corr\"] = \"TIMECORR\"\n colmap[\"bg\"] = \"SAP_BKG\"\n colmap[\"bg_err\"] = \"SAP_BKG_ERR\"\n\n colmap[\"pos1\"] = \"MOM_CENTR1\"\n colmap[\"pos1_err\"] = \"MOM_CENTR1_ERR\"\n colmap[\"pos2\"] = \"MOM_CENTR2\"\n colmap[\"pos2_err\"] = \"MOM_CENTR2_ERR\"\n\n if self.pars.use_psf_positions:\n colmap[\"pos1\"] = \"PSF_CENTR1\"\n colmap[\"pos1_err\"] = \"PSF_CENTR1_ERR\"\n colmap[\"pos2\"] = \"PSF_CENTR2\"\n colmap[\"pos2_err\"] = \"PSF_CENTR2_ERR\"\n\n colmap[\"pos_corr1\"] = \"POS_CORR1\"\n colmap[\"pos_corr2\"] = \"POS_CORR2\"\n\n return colmap, time_info\n\n def _append_local_name(self, source):\n \"\"\"\n Append to the local_names of the source.\n In this case the alias is the TIC ID.\n \"\"\"\n if self.name.upper() not in source.local_names:\n raw_data = None\n for dt in self.pars.data_types:\n for rd in getattr(source, f\"raw_{dt}\"):\n if rd.observatory == self.name:\n raw_data = rd\n break\n\n if raw_data is not None and \"file_headers\" in raw_data.altdata:\n source.local_names[self.name.upper()] = raw_data.altdata[\n \"file_headers\"\n ][0][\"TICID\"]\n\n @staticmethod\n def _get_exposure_time(altdata):\n \"\"\"\n Get the exposure time of the observations\n from the altdata.\n ref: page 20 of https://archive.stsci.edu/files/live/sites/mast/files/home/missions-and-data/active-missions/tess/_documents/EXP-TESS-ARC-ICD-TM-0014-Rev-F.pdf\n Returns\n -------\n exp_time: float\n The exposure time of the observations.\n \"\"\"\n\n if \"lightcurve_headers\" in altdata and len(altdata[\"lightcurve_headers\"]) > 0:\n header = altdata[\"lightcurve_headers\"][0]\n frame_time = header[\"INT_TIME\"]\n else:\n frame_time = 0.98 * 2.0\n\n if \"file_headers\" in altdata and len(altdata[\"file_headers\"]) > 0:\n header = altdata[\"file_headers\"][0]\n num_frames = header[\"CRBLKSZ\"] # this many frames per block\n # brightest and dimmest frames are removed to avoid cosmic rays\n if header[\"CRMITEN\"] or header[\"CRSPOC\"]:\n num_frames -= 2\n\n # native exposure time for each TESS image is 2.0s minus 0.04s dead time\n exp_time = frame_time * num_frames\n else:\n exp_time = None\n\n altdata[\"EXP_TIME\"] = exp_time\n\n return exp_time\n\n def fetch_by_ticid(\n self,\n ticid,\n download=True,\n use_catalog=True,\n session=None,\n filter_args=None,\n download_args={},\n dataset_args={},\n ):\n \"\"\"\n Get a source given a TIC number.\n The observatory will first search the database for\n sources where the local_names have 'TESS': .\n Will try to look for sources inside this same project name\n and with the same cfg_hash. If none are found to have raw photometry,\n sources from other projects are searched as well.\n If not found, will download the data and create a new source\n and a new raw photometry entry.\n\n Parameters\n ----------\n ticid: str or int\n TIC number of the object to download data for.\n download: bool\n If True, will download the data if it is not found.\n When downloading data, a new RawPhotometry object is\n created if it doesn't exist, and a new Source object\n is created unless it already existed, with the same\n project name and cfg_hash. Default is True.\n use_catalog: bool\n If True, and only if a catalog object is given to\n the observatory, if a new source is created,\n an attempt will be made to match the source to\n a catalog entry, using the radius and magnitude\n to match any existing sources. Default is True.\n session: sqlalchemy session\n If given, will use this session to query the database.\n If not given, will create a new session and close it\n at the end of the function.\n To avoid any database interactions, set session=False.\n filter_args: list\n List of additional constraints on the database search\n for sources, e.g., Source.test_hash.is_(None).\n download_args: dict\n Dictionary of arguments to pass to the download function.\n dataset_args: dict\n Additional keyword arguments to pass to the\n constructor of raw data objects.\n\n Returns\n -------\n source: Source\n The source object that was downloaded\n or found from the database. If no source\n was found, returns None.\n \"\"\"\n if self.project is None:\n raise ValueError(\"No project given to observatory.\")\n\n if filter_args is None:\n filter_args = []\n\n source = None\n ticid = str(ticid)\n\n with SmartSession(session) as session:\n # first, check if the source is already in the database\n sources = session.scalars(\n sa.select(Source).where(\n Source.local_names[\"TESS\"].astext == ticid,\n *filter_args,\n )\n ).all()\n sources.sort(\n key=lambda x: x.created_at if x.created_at else datetime.min,\n reverse=True,\n )\n\n # only sources inside this project\n project_sources = [s for s in sources if s.project == self.project]\n\n # only sources inside this project (and with same version control)\n project_vc_sources = [\n s for s in project_sources if s.cfg_hash == self.cfg_hash\n ]\n\n if len(project_vc_sources) > 0:\n source = Source.find_source_with_raw_data(\n project_vc_sources,\n obs=self.name,\n session=session,\n check_data=self.pars.check_data_exists,\n )\n\n if source is None and len(project_sources) > 0:\n source = Source.find_source_with_raw_data(\n project_sources,\n obs=self.name,\n session=session,\n check_data=self.pars.check_data_exists,\n )\n\n if source is None and len(sources) > 0:\n source = Source.find_source_with_raw_data(\n project_sources,\n obs=self.name,\n session=session,\n check_data=self.pars.check_data_exists,\n )\n\n if source is None:\n # try to find a RawPhotometry object without a source:\n raw_data = session.scalars(\n sa.select(RawPhotometry).where(\n RawPhotometry.altdata[\"TICID\"].astext == ticid,\n )\n ).all()\n if len(raw_data) > 0:\n raw_data.sort(\n key=lambda x: x.created_at if x.created_at else datetime.min,\n reverse=True,\n )\n raw_data = raw_data[0]\n altdata = raw_data.altdata\n else:\n raw_data = None\n altdata = None\n\n # couldn't find a source or raw data, download it\n if raw_data is None and download:\n data, altdata = self._download_lightcurves_from_mast_by_ticid(ticid)\n\n if \"file_headers\" not in altdata or len(altdata[\"file_headers\"]) == 0:\n raise ValueError(\"Cannot find file_headers in altdata! \")\n\n # this happens if download=False and no raw photometry was found\n if altdata is None:\n return None\n\n mag = altdata[\"file_headers\"][0][\"TESSMAG\"]\n ra = altdata[\"file_headers\"][0][\"RA_OBJ\"]\n dec = altdata[\"file_headers\"][0][\"DEC_OBJ\"]\n pm_ra = altdata[\"file_headers\"][0][\"PMRA\"]\n pm_dec = altdata[\"file_headers\"][0][\"PMDEC\"]\n\n source_name = ticid # try to find a better name below\n\n # match the found source to the correct name in the catalog\n if use_catalog and self.catalog is not None:\n cat_row = self.catalog.get_nearest_row(\n ra, dec, radius=self.pars.query_radius, output=\"dict\"\n )\n # TODO: I can't think of a better thing to do in this case... maybe just use the TIC name?\n if cat_row is None:\n raise ValueError(\n f\"No catalog entry found within radius {self.pars.query_radius} arcsec!\"\n )\n source_name = cat_row[\"name\"] # update with catalog name!\n else:\n cat_row = dict(\n mag=mag,\n name=source_name,\n ra=ra,\n dec=dec,\n pmra=pm_ra,\n pmdec=pm_dec,\n )\n\n source = Source(**cat_row, project=self.project, cfg_hash=self.cfg_hash)\n source.cat_row = cat_row # save the raw catalog row as well\n\n if raw_data is None:\n raw_data = source.get_data(\n obs=self.name,\n data_type=\"photometry\",\n level=\"raw\",\n session=session,\n check_data=self.pars.check_data_exists,\n )\n raw_data = raw_data[0] if len(raw_data) > 0 else None\n\n if raw_data is None:\n altdata[\"cat_row\"] = cat_row\n\n # save the parameters involved with the download\n download_pars = {\n k: self.pars[k] for k in self.pars.download_pars_list\n }\n download_pars.update(\n {\n k: download_args[k]\n for k in self.pars.download_pars_list\n if k in download_args\n }\n )\n altdata[\"download_pars\"] = download_pars\n colmap, time_info = self.get_colmap_time_info(data, altdata)\n\n dataset_args[\"colmap\"] = colmap\n dataset_args[\"time_info\"] = time_info\n raw_data = RawPhotometry(\n data=data,\n altdata=altdata,\n observatory=self.name,\n source_name=source_name,\n **dataset_args,\n )\n\n source.raw_photometry.append(raw_data)\n\n return source\n\n def download_from_observatory(\n self,\n cat_row,\n **_,\n ):\n \"\"\"\n Fetch data from TESS for a given source.\n Returns a dataframe including all TESS observations of given source.\n Returns empty dataframe if:\n - source magnitude is too high (too faint for TESS)\n - source doesn't exist in TESS catalog\n - TESS has no data for source\n\n Parameters\n ----------\n cat_row: dict like\n A row in the catalog for a specific source.\n In general, this row should contain the following keys:\n name, ra, dec, mag, filter_name (saying which band the mag is in).\n\n Returns\n -------\n data : pandas.DataFrame or other data structure\n Raw data from the observatory, to be put into a RawPhotometry object.\n altdata: dict\n Additional data to be stored in the RawPhotometry object.\n\n \"\"\"\n\n # name should be source_id for GAIA catalog entry\n name = cat_row[\"name\"]\n dec = cat_row[\"dec\"]\n ra = cat_row[\"ra\"]\n mag = cat_row[\"mag\"]\n\n # TESS can't see stars fainter than this\n if mag > self.pars.mag_limit:\n self.pars.vprint(f\"Magnitude of {mag} is too faint for TESS.\")\n return pd.DataFrame(), {}\n\n cat_params = {\n \"coordinates\": SkyCoord(ra, dec, frame=\"icrs\", unit=\"deg\"),\n \"catalog\": \"TIC\",\n \"radius\": self.pars.query_radius / 3600,\n }\n catalog_data = self._try_query(Catalogs.query_region, cat_params)\n if len(catalog_data) == 0:\n self.pars.vprint(\"No TESS object found for given catalog row.\")\n return pd.DataFrame(), {}\n\n candidate_idx = None\n for i in range(len(catalog_data)):\n # catalog is sorted by distance\n # -> iterating from least to greatest distance\n m = catalog_data[\"GAIAmag\"][i]\n d = catalog_data[\"dstArcSec\"][i]\n if (\n ~np.isnan(m)\n and abs(m - mag) < self.pars.magdiff_thresh\n and ~np.isnan(d)\n and d < self.pars.distance_thresh\n ):\n candidate_idx = i\n # grab the first candidate within dist and magdiff threshold\n break\n\n if candidate_idx is None:\n self.pars.vprint(\n \"No objects found within mag difference threshold for TIC query.\",\n )\n return pd.DataFrame(), {}\n\n ticid = catalog_data[\"ID\"][candidate_idx]\n data, altdata = self._download_lightcurves_from_mast_by_ticid(ticid)\n\n return data, altdata\n\n def _download_lightcurves_from_mast_by_ticid(self, ticid):\n \"\"\"\n Download the data from MAST database using the given TIC ID.\n\n Parameters\n ----------\n ticid: str or int\n The TIC ID of the object to download data for.\n\n Returns\n -------\n data: pandas.DataFrame\n The lightcurve data.\n altdata: dict\n Additional data to be stored in the RawPhotometry object.\n \"\"\"\n ticid = str(ticid)\n\n tess_name = \"TIC \" + ticid\n obs_params = {\n \"objectname\": tess_name,\n \"radius\": self.pars.query_radius / 3600,\n \"obs_collection\": \"TESS\",\n \"dataproduct_type\": \"timeseries\",\n }\n data_query = self._try_query(Observations.query_criteria, obs_params)\n\n if len(data_query) == 0:\n self.pars.vprint(f\"No data found for object {tess_name}.\")\n return pd.DataFrame(), {}\n if ticid not in data_query[\"target_name\"]:\n self.pars.vprint(f\"No timeseries data found for object {tess_name}.\")\n return pd.DataFrame(), {}\n\n lc_indices = []\n for i in range(len(data_query)):\n uri = data_query[\"dataURL\"][i]\n id = data_query[\"target_name\"][i]\n if isinstance(uri, str) and uri[-7:-5] == \"lc\" and id == ticid:\n lc_indices.append(i)\n\n if not lc_indices:\n self.pars.vprint(f\"No lightcurve data found for object {tess_name}.\")\n return pd.DataFrame(), {}\n\n self.pars.vprint(f\"Found {len(lc_indices)} light curve(s) for this source.\")\n\n base_url = \"https://mast.stsci.edu/api/v0.1/Download/file?uri=\"\n\n sectors = []\n df_list = []\n file_header_list = []\n lc_header_list = []\n aperture_list = []\n ap_header_list = []\n\n for i in lc_indices:\n uri = data_query[\"dataURL\"][i]\n (\n file_header,\n lightcurve_data,\n lightcurve_header,\n aperture_array,\n aperture_header,\n ) = self._try_open_fits(base_url + uri)\n\n sectors.append(file_header[\"SECTOR\"])\n lightcurve_data[\"SECTOR\"] = file_header[\"SECTOR\"]\n # get the exposure time from the header\n lightcurve_data[\"EXPTIME\"] = lightcurve_header[\"EXPOSURE\"]\n df_list.append(lightcurve_data)\n\n file_header_list.append(file_header)\n lc_header_list.append(lightcurve_header)\n\n # convert the aperture matrix into a nested list\n aperture_list.append(aperture_array.tolist())\n ap_header_list.append(aperture_header)\n\n # go over the dataframes and find the ones with the most exposures per sector:\n new_file_header_list = []\n new_lc_header_list = []\n new_aperture_list = []\n new_ap_header_list = []\n new_df_list = []\n unique_sectors = list(set(sectors))\n unique_sectors.sort()\n for s in unique_sectors:\n best_len = 0\n best_idx = None\n for i, (df, h) in enumerate(zip(df_list, file_header_list)):\n if h[\"SECTOR\"] == s and len(df) > best_len:\n best_len = len(df)\n best_idx = i\n\n new_df_list.append(df_list[best_idx])\n new_file_header_list.append(file_header_list[best_idx])\n new_lc_header_list.append(lc_header_list[best_idx])\n new_aperture_list.append(aperture_list[best_idx])\n new_ap_header_list.append(ap_header_list[best_idx])\n\n altdata = dict(TICID=int(ticid), filter=\"TESS\")\n\n altdata[\"SECTORS\"] = unique_sectors\n altdata[\"file_headers\"] = new_file_header_list\n altdata[\"lightcurve_headers\"] = new_lc_header_list\n altdata[\"aperture_arrays\"] = new_aperture_list\n altdata[\"aperture_headers\"] = new_ap_header_list\n self._get_exposure_time(altdata)\n\n data = pd.concat(new_df_list, ignore_index=True)\n\n altdata[\"sectors\"] = list(sectors)\n\n return data, altdata\n\n def _try_query(self, query_fn, params):\n \"\"\"\n Makes an astroquery request repeatedly, ignoring any timeout errors.\n Returns first successful response, otherwise raises TimeoutError.\n \"\"\"\n # maybe try using multiprocessing to terminate after 10 secs?\n for tries in range(10):\n try:\n self.pars.vprint(f\"Making query request, attempt {tries + 1}/10 ...\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", NoResultsWarning)\n ret = query_fn(**params)\n return ret\n except TimeoutError as e:\n self.pars.vprint(f\"Request timed out.\")\n\n raise TimeoutError(f\"Too many timeouts from query request.\")\n\n def _try_open_fits(self, url):\n \"\"\"\n Tries to open fits file repeatedly, ignoring any timeout errors.\n Returns first successful response, otherwise raises TimeoutError.\n\n Returns\n -------\n file_header: dict\n Header of the FITS file.\n lightcurve_data: np.ndarray\n Data from the first extension,\n including the lightcurve for this file.\n lightcurve_header: dict\n Header of the first extension,\n including metadata for the lightcurve.\n aperture_array: nested 2D list\n Data from the second extension,\n including an aperture mask for this source.\n aperture_header: dict\n Header of the second extension,\n with some metadata about the aperture.\n \"\"\"\n file_header_exclusions = [\n \"SIMPLE\",\n \"BITPIX\",\n \"NAXIS\",\n \"EXTEND\",\n \"NEXTEND\",\n \"EXTNAME\",\n \"EXTVER\",\n \"ORIGIN\",\n \"TELESCO\",\n \"INSTRUME\",\n ]\n extention_header_exclusions = [\n \"XTENSION\",\n \"BITPIX\",\n \"NAXIS\",\n \"NAXIS1\",\n \"NAXIS2\",\n \"PCOUNT\",\n \"GCOUNT\",\n ]\n\n for _ in range(10):\n try:\n # TODO: can we store some of the extra info from FITS\n # e.g., the units on the data columns?\n with fits.open(url, cache=False) as hdul:\n file_header = dict(hdul[0].header)\n for key in file_header_exclusions:\n if key in file_header:\n del file_header[key]\n\n lightcurve_data = pd.DataFrame(hdul[1].data)\n lightcurve_header = dict(hdul[1].header)\n aperture_array = hdul[2].data\n aperture_header = dict(hdul[2].header)\n\n for key in extention_header_exclusions:\n if key in lightcurve_header:\n del lightcurve_header[key]\n if key in aperture_header:\n del aperture_header[key]\n\n # rename the TIME column of the lightcurve\n # this will help make sure we know the units and offset from JD\n # lightcurve_data.rename(columns={\"TIME\": lightcurve_header['TUNIT1']}, inplace=True)\n return (\n file_header,\n lightcurve_data,\n lightcurve_header,\n aperture_array,\n aperture_header,\n )\n except socket.timeout:\n continue\n except HTTPError:\n # This printout can be removed after we figure out\n # if these errors are common or not.\n print(\"Encountered an HTTPError. Retrying...\")\n continue\n\n raise TimeoutError(f\"Too many timeouts from trying to open fits.\")\n\n\nif __name__ == \"__main__\":\n import src.database\n\n src.database.DATA_ROOT = \"data\"\n tess = VirtualTESS(project=\"tess_wds\", verbose=10)\n\n tess.catalog = Catalog(default=\"wd\")\n tess.catalog.load()\n\n tess.fetch_all_sources(save=True, reduce=True)\n #\n # print(\"finished loading catalog\")\n #\n # count = 0\n # for i in range(len(white_dwarfs.data)):\n # if i > 20:\n # break\n #\n # cat_row = white_dwarfs.get_row(i, output=\"dict\")\n # if cat_row[\"mag\"] > 16:\n # continue\n #\n # print(f\"index={i}, cat_row: {cat_row}\")\n # tess.fetch_source(cat_row, reduce=True, save=1)\n\n # result = tess.download_from_observatory(cat_row, verbose=1)\n # if not result[1]: # failed fetch returns empty dict\n # continue\n #\n #\n #\n # lc_data, altdata = result\n # print(\n # f\"TICID = {altdata['TICID']}, GAIA mag = {cat_row['mag']}, TESS mag = {altdata['TESSMAG']}\"\n # )\n # count += 1\n #\n # ticid = altdata[\"TICID\"]\n # print(\"saving to disk...\")\n # lc_data.to_hdf(\n # \"/Users/felix_3gpdyfd/astro_research/virtualobserver\"\n # f\"/notebook/tess_data_TEST/tess_lc_{ticid}.h5\",\n # key=\"df\",\n # )\n\n # print(f\"\\nFinal Count: {count}\")\n # print(tess.latest_source.raw_photometry[0].loaded_status)\n","repo_name":"guynir42/AstroRetriever","sub_path":"src/tess.py","file_name":"tess.py","file_ext":"py","file_size_in_byte":32577,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"9"} +{"seq_id":"19731814169","text":"# !/usr/bin/env python\n# encoding: utf-8\n\nimport requests\nimport json\nimport re\n\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.db.models import Q\nfrom numbers import Number\n\n\nclass MapQuestAPIKeyError(Exception):\n \"\"\"Your MapQuest API Key is either invalid or at its limit.\"\"\"\n pass\n\n\ndef long_lat_wkt(state):\n \"\"\"\n This translates GIS data saved as binary (WKB) into a text string (WKT).\n 4326 refers to the commonly used spatial reference system and is used\n for the GIS fields on the PropertyState and TaxLotState models.\n \"\"\"\n if state.long_lat:\n return GEOSGeometry(state.long_lat, srid=4326).wkt\n\n\ndef bounding_box_wkt(state):\n \"\"\"\n This translates GIS data saved as binary (WKB) into a text string (WKT).\n \"\"\"\n if state.bounding_box:\n return GEOSGeometry(state.bounding_box, srid=4326).wkt\n\n\ndef geocode_buildings(buildings):\n \"\"\"\n Expects either a QuerySet (QS) of PropertyStates or a QS TaxLotStates.\n\n Previous manually geocoded -States (not via API) are handled then\n separated first. Everything else is eligible for geocoding (even those\n successfully geocoded before).\n\n With these remaining -States, build a dictionary of {id: address} and\n a dictionary of {address: geocoding_results}. It uses those two to construct\n a dictionary of {id: geocoding_results}. Finally, the\n {id: geocoding_results} dictionary is used to update the QS objects.\n\n Depending on if and how a -State is geocoded, the geocoding_confidence is\n populated with the details such as the confidence quality or lack thereof.\n \"\"\"\n # -States with longitude and latitude prepopulated while excluding those previously geocoded by API\n pregeocoded = buildings.filter(longitude__isnull=False, latitude__isnull=False).exclude(geocoding_confidence__startswith=\"High\")\n _geocode_by_prepopulated_fields(pregeocoded)\n\n # Include ungeocoded -States as well as previously API geocoded -States.\n buildings_to_geocode = buildings.filter(Q(longitude__isnull=True, latitude__isnull=True) | Q(geocoding_confidence__startswith=\"High\"))\n\n # Don't continue if there are no buildings remaining\n if not buildings_to_geocode:\n return\n\n org = buildings_to_geocode[0].organization\n mapquest_api_key = org.mapquest_api_key\n\n # Don't continue if the mapquest_api_key for this org is ''\n if not mapquest_api_key:\n return\n\n # Don't continue if geocoding is disabled on this org\n if not org.geocoding_enabled:\n return\n\n id_addresses = _id_addresses(buildings_to_geocode, org)\n\n # Don't continue if there are no addresses to geocode, indiciating an insufficient\n # number of geocoding columns for all individual buildings or the whole org\n if not id_addresses:\n return\n\n address_geocoding_results = _address_geocoding_results(id_addresses, mapquest_api_key)\n\n id_geocoding_results = _id_geocodings(id_addresses, address_geocoding_results)\n\n _save_geocoding_results(id_geocoding_results, buildings_to_geocode)\n\n\ndef _save_geocoding_results(id_geocoding_results, buildings_to_geocode):\n for id, geocoding_result in id_geocoding_results.items():\n building = buildings_to_geocode.get(pk=id)\n\n if geocoding_result.get(\"is_valid\"):\n building.long_lat = geocoding_result.get(\"long_lat\")\n building.geocoding_confidence = f\"High ({geocoding_result.get('quality')})\"\n\n building.longitude = geocoding_result.get(\"longitude\")\n building.latitude = geocoding_result.get(\"latitude\")\n else:\n building.geocoding_confidence = f\"Low - check address ({geocoding_result.get('quality')})\"\n\n building.save()\n\n\ndef _geocode_by_prepopulated_fields(buildings):\n for building in buildings.iterator():\n long_lat = f\"POINT ({building.longitude} {building.latitude})\"\n building.long_lat = long_lat\n building.geocoding_confidence = \"Manually geocoded (N/A)\"\n building.save()\n\n\ndef _id_addresses(buildings, org):\n \"\"\"\n Return a dictionary with {id: address, ...} containing only addresses with\n enough components.\n\n Expects all buildings to be of the same type - either PropertyState or TaxLotState\n\n For any addresses that don't have enough components,\n specify this in `geocoding_confidence`.\n \"\"\"\n geocoding_columns = org.column_set.filter(\n geocoding_order__gt=0,\n table_name=buildings[0].__class__.__name__\n ).order_by('geocoding_order').values('column_name', 'is_extra_data')\n\n if geocoding_columns.count() == 0:\n return {}\n\n id_addresses = {}\n\n for building in buildings.iterator():\n full_address = _full_address(building, geocoding_columns)\n if full_address is not None:\n id_addresses[building.id] = full_address\n else:\n building.geocoding_confidence = \"Missing address components (N/A)\"\n building.save()\n\n return id_addresses\n\n\ndef _full_address(building, geocoding_columns):\n \"\"\"\n Using organization-specific geocoding columns, a full address string is built.\n\n Check there are at least 1 address components present. Combine components to\n one full address. This helps to avoid receiving MapQuests' best guess result.\n For example, only sending '3001 Brighton Blvd, Suite 2693' would yield a\n valid point from one of multiple cities.\n\n Before passing the address back, special and reserved characters are removed.\n \"\"\"\n\n address_components = []\n for col in geocoding_columns:\n if col['is_extra_data']:\n address_value = building.extra_data.get(col['column_name'], None)\n else:\n address_value = getattr(building, col['column_name'])\n\n # Only accept non-empty strings or numbers\n if (isinstance(address_value, (str, Number))) and (address_value != \"\"):\n address_components.append(str(address_value))\n\n if len(address_components) > 0:\n full_address = \", \".join(address_components)\n return re.sub(r'[;/?:@=&\"<>#%{}|[\"^~`\\]\\\\]', '', full_address)\n else:\n return None\n\n\ndef _address_geocoding_results(id_addresses, mapquest_api_key):\n addresses = list(set(id_addresses.values()))\n\n batched_addresses = _batch_addresses(addresses)\n results = []\n\n for batch in batched_addresses:\n locations = {\"locations\": []}\n locations[\"locations\"] = [{\"street\": address} for address in batch]\n locations_json = json.dumps(locations)\n\n request_url = (\n 'https://www.mapquestapi.com/geocoding/v1/batch?' +\n '&inFormat=json&outFormat=json&thumbMaps=false&maxResults=2' +\n '&json=' + locations_json +\n '&key=' + mapquest_api_key\n )\n\n response = requests.get(request_url)\n try:\n results += response.json().get('results')\n except Exception as e:\n if response.status_code == 403:\n raise MapQuestAPIKeyError('Failed geocoding property states due to MapQuest error. Your MapQuest API Key is either invalid or at its limit.')\n else:\n raise e\n\n return {_response_address(result): _analyze_location(result) for result in results}\n\n\ndef _response_address(result):\n return result.get('providedLocation').get('street')\n\n\ndef _analyze_location(result):\n \"\"\"\n If multiple geolocations are returned, pass invalid indicator of \"Ambiguous\".\n\n According to MapQuest API\n - https://developer.mapquest.com/documentation/geocoding-api/quality-codes/\n GeoCode Quality ratings are provided in 5 characters in the form 'ZZYYY'.\n 'ZZ' describes granularity level, and 'YYY' describes confidence ratings.\n\n Accuracy to either a point or a street address is accepted, while confidence\n ratings must all be at least A's and B's without C's or X's (N/A).\n \"\"\"\n if len(result.get('locations')) != 1:\n return {\"quality\": \"Ambiguous\"}\n\n quality = result.get('locations')[0].get('geocodeQualityCode')\n granularity_level = quality[0:2]\n confidence_level = quality[2:5]\n is_acceptable_granularity = granularity_level in [\"P1\", \"L1\"]\n is_acceptable_confidence = not (\"C\" in confidence_level or \"X\" in confidence_level)\n\n if is_acceptable_confidence and is_acceptable_granularity:\n long = result.get('locations')[0].get('displayLatLng').get('lng')\n lat = result.get('locations')[0].get('displayLatLng').get('lat')\n\n return {\n \"is_valid\": True,\n \"long_lat\": f\"POINT ({long} {lat})\",\n \"quality\": quality,\n \"longitude\": long,\n \"latitude\": lat\n }\n else:\n return {\"quality\": quality}\n\n\ndef _id_geocodings(id_addresses, address_geocoding_results):\n return {\n id: address_geocoding_results.get(address)\n for id, address\n in id_addresses.items()\n if address_geocoding_results.get(address) is not None\n }\n\n\ndef _batch_addresses(addresses, n=50):\n for i in range(0, len(addresses), n):\n try:\n yield addresses[i:i + n]\n except StopIteration:\n return\n","repo_name":"ClearlyEnergy/seed-python3","sub_path":"seed/utils/geocode.py","file_name":"geocode.py","file_ext":"py","file_size_in_byte":9151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"21130587266","text":"from ml.logger import log\nfrom ml.models.base import BaseTrainModel, BaseInferenceModel\nfrom ml.file_utils import _find_cls_using_name\n\n\ndef create_train_model(opt, train_loader, test_loader, name):\n\n log(f'Finding train model with name: [{name}] ... ')\n\n cls = _find_cls_using_name(\n name,\n package='models',\n parent_class=BaseTrainModel,\n cls_postfix='TrainModel'\n )\n\n instance = cls(opt, train_loader, test_loader)\n\n log(f'done: [{instance.__class__.__name__}] was created')\n return instance\n\n\ndef create_inference_model(opt, inference_loader, name):\n\n log(f'Finding inference model with name: [{name}] ... ')\n\n cls = _find_cls_using_name(\n name,\n package='models',\n parent_class=BaseInferenceModel,\n cls_postfix='InferenceModel'\n )\n\n instance = cls(opt, inference_loader)\n\n log(f'done: [{instance.__class__.__name__}] was created')\n return instance\n","repo_name":"weilueluo/ucl-master-project","sub_path":"ml/models/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"39006850379","text":"# coding: utf-8\n\nimport serial\nimport sys\nimport os\nfrom PIL import Image\nfrom datetime import datetime\n\nif len(sys.argv) < 4:\n print('[用法]: {} '.format(sys.argv[0]))\n exit()\n\ntry:\n nc = int(sys.argv[2], 10) # 列数\n nr = int(sys.argv[3], 10) # 行数\n\n baudrate = 115200\n timeout = int((nc * nr / (baudrate / 8) + 5) * 2)\n with serial.Serial(sys.argv[1], baudrate, timeout=timeout) as ser:\n print(\n '[信息]:准备接收图像,按任意键继续...')\n os.system('pause')\n command = 'i' + sys.argv[2] + sys.argv[3]\n ser.write(command.encode(encoding=\"utf-8\"))\n print(command)\n imgbuff = ser.read(nc * nr)\n print('RECEVIED {} BYTES'.format(len(imgbuff)))\n i = Image.frombytes(mode='L', size=(nc, nr), data=imgbuff)\n dt = datetime.now()\n i.save('DBG' + dt.strftime('%y%m%d%H%M%S') + '.jpg')\nexcept serial.SerialException as e:\n print(\n '[错误]:不存在的串口{},请使用 python -m serial.tools.list_ports 查看可用串口。\\n将退出...'\n .format(sys.argv[1]))\n exit()\n","repo_name":"upcomputer/up1","sub_path":"img_recv.py","file_name":"img_recv.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"25932939441","text":"# Databricks notebook source\n# MAGIC %md ## Generating Audit Stats \n# MAGIC\n# MAGIC\n# MAGIC\n# MAGIC ### Description:\n# MAGIC This notebook does the sanity checks of counts in output files and input files\n# MAGIC\n# MAGIC\n# MAGIC ### Requirements:\n# MAGIC\n# MAGIC
      \n# MAGIC\n# MAGIC
    • The matched entity and raw data should be available at the storage layer.\n# MAGIC\n# MAGIC
    \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Load the Datasets\n\n# COMMAND ----------\n\n# MAGIC %run ../2.Common/ADLSDesignedPaths\n\n# COMMAND ----------\n\nrogers_contact_df = spark.read.format(\"parquet\").load(rogers_contact)\nrogers_wireless_account_df = spark.read.format(\"parquet\").load(rogers_wireless_account)\nrogers_wireline_account_df = spark.read.format(\"parquet\").load(rogers_wireline_account)\nshaw_consumer_wireless_account_df = spark.read.format(\"parquet\").load(shaw_consumer_wireless_account)\nshaw_consumer_wireline_account_df = spark.read.format(\"parquet\").load(shaw_consumer_wireline_account)\nshaw_consumer_contact_df = spark.read.format(\"parquet\").load(shaw_consumer_contact)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Input records\n\n# COMMAND ----------\n\nr_combine = rogers_contact_df.select('x_rcis_id').unionAll(rogers_wireless_account_df.select('x_rcis_id')).unionAll(rogers_wireline_account_df.select('x_rcis_id')).distinct()\ns_combine = shaw_consumer_contact_df.select('rcis_id').unionAll(shaw_consumer_wireless_account_df.select('rcis_id')).unionAll(shaw_consumer_wireline_account_df.select('rcis_id')).distinct()\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Dropped Records\n\n# COMMAND ----------\n\nr_wls_c_joined = spark.read.format(\"parquet\").load(foundation_rogers_wls_c)\nr_wln_c_joined = spark.read.format(\"parquet\").load(foundation_rogers_wln_c)\ns_wls_c_joined = spark.read.format(\"parquet\").load(foundation_shaw_wls_c)\ns_wln_c_joined = spark.read.format(\"parquet\").load(foundation_shaw_wln_c)\n\n# COMMAND ----------\n\nr_df = r_wls_c_joined.unionByName(r_wln_c_joined).dropDuplicates(['r_rcis_id_cl'])\ns_df = s_wls_c_joined.unionByName(s_wln_c_joined).dropDuplicates(['s_rcis_id_cl'])\n\n# COMMAND ----------\n\nr_dropped = r_combine.join(r_df, r_combine.x_rcis_id == r_df.r_rcis_id_cl, how='left_anti')\ns_dropped = s_combine.join(s_df, s_combine.rcis_id == s_df.s_rcis_id_cl, how='left_anti')\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Matched & Unmatched Records\n\n# COMMAND ----------\n\nmatched_entity_df = spark.read.format(\"parquet\").load(matched_entity_final)\n\n# COMMAND ----------\n\n#unique customer\nr_matched = matched_entity_df.filter((col('ROGERS_ECID').isNotNull()) & (col('SHAW_MASTER_PARTY_ID').isNotNull())).dropDuplicates(['ROGERS_ECID'])\ns_matched = matched_entity_df.filter((col('ROGERS_ECID').isNotNull()) & (col('SHAW_MASTER_PARTY_ID').isNotNull())).dropDuplicates(['SHAW_MASTER_PARTY_ID'])\n\n# COMMAND ----------\n\ns_unmatched = matched_entity_df.filter((col('ROGERS_ECID').isNull()) & (col('SHAW_MASTER_PARTY_ID').isNotNull())).dropDuplicates(['SHAW_MASTER_PARTY_ID'])\nr_unmatched = matched_entity_df.filter((col('ROGERS_ECID').isNotNull()) & (col('SHAW_MASTER_PARTY_ID').isNull())).dropDuplicates(['ROGERS_ECID'])\n\n# COMMAND ----------\n\nr_date = rogers_contact_df.first()['snapshot_stamp']\ns_date = shaw_consumer_contact_df.first()['SNAPSHOT_STAMP']\n\nm_date = matched_entity_df.first()['MATCHED_TIMESTAMP']\n\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Generate Stats\n\n# COMMAND ----------\n\ncols = ['Source', 'CustRawData', 'CustDropped', 'CustMatched','CustUnmatched', 'InputSnapshotStamp', 'MatchedTimestamp']\nvals = [('ROGERS', r_combine.count(), r_dropped.count(), r_matched.count(), r_unmatched.count(), r_date , m_date),\n ('SHAW', s_combine.count(), s_dropped.count(), s_matched.count(), s_unmatched.count(), s_date , m_date)]\n\ndf = spark.createDataFrame(vals, cols)\n# display(df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC\n# MAGIC\n# MAGIC\n# MAGIC\n# MAGIC\n# MAGIC ## Create and Store in DB\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC CREATE DATABASE IF NOT EXISTS AUDIT_SUMMARY;\n# MAGIC SHOW DATABASES\n\n# COMMAND ----------\n\ndf.createOrReplaceTempView('df')\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC USE AUDIT_SUMMARY;\n# MAGIC SELECT CURRENT_DATABASE();\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC \n# MAGIC CREATE TABLE IF NOT EXISTS audit_summary (\n# MAGIC Source STRING,\n# MAGIC CustRawData INT,\n# MAGIC CustDropped INT, \n# MAGIC CustMatched INT,\n# MAGIC CustUnmatched INT,\n# MAGIC InputSnapshotStamp DATE,\n# MAGIC MatchedTimestamp TIMESTAMP\n# MAGIC )\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC\n# MAGIC INSERT INTO audit_summary\n# MAGIC SELECT * FROM df\n\n# COMMAND ----------\n\n# MAGIC %sql\n# MAGIC\n# MAGIC select *\n# MAGIC from audit_summary\n","repo_name":"LukeZhang-Rogers/clean-room-consumer","sub_path":"(Clone) Consumer-Phase2-LD1_v1/9.Stats/AuditStats.py","file_name":"AuditStats.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"6908697234","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def pathSum(self, root: TreeNode, sum: int) -> List[List[int]]:\n if (root and not isinstance(root, TreeNode)) or not isinstance(sum, int):\n return None\n if not root:\n return []\n\n ret = []\n stack = [(root, [root.val])]\n while stack:\n cur_node, cur_path = stack.pop()\n left, right = cur_node.left, cur_node.right\n\n cur_sum = 0\n for item in cur_path:\n cur_sum += item\n\n if (cur_sum == sum) and (left is None and right is None):\n ret.append(cur_path)\n else:\n if right:\n stack.append((right, cur_path + [right.val]))\n if left:\n stack.append((left, cur_path + [left.val]))\n\n return ret","repo_name":"lofues/LeetCode-Excerise","sub_path":"113_路径总和2.py","file_name":"113_路径总和2.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"72982854052","text":"from django.shortcuts import render, redirect\nfrom django.http import JsonResponse\nfrom django.db.models import Q\nfrom django.views.decorators.http import require_GET\nfrom .models import Word, Vocab\nimport random\n\nQUIZ_LENGTH = 10\n\ndef home(request):\n return render(request, 'website/home.html', {})\n\n\n@require_GET\ndef search(request):\n query = request.GET.get('q', '')\n results = Vocab.objects.filter(Q(pinyin3__icontains=query) | Q(english1__icontains=query)).values()#[:10]\n html = render(request, 'website/search_results.html', {'results': results}).content.decode('utf-8')\n\n return JsonResponse({'html': html})\n\n\ndef character(request, char):\n word = Vocab.objects.filter(simplified=char).first()\n context = {\n 'character': char,\n 'word': word\n }\n\n return render(request, 'website/character.html', context)\n\n\ndef character_search(request):\n return render(request, 'website/character_search.html')\n\n\ndef dictionary(request):\n return render(request, 'website/dictionary.html', {})\n\n\ndef flashcards(request, level):\n words = list(Vocab.objects.filter(hsk_level=level).values())\n # print(\"WORDS: \", words[:5])\n words = sorted(words, key=lambda k: random.random())\n \n for word in words[:QUIZ_LENGTH]:\n ans = generate_answers(level, word)\n ans = random.sample(ans, k=len(ans))\n word[\"answer1\"] = ans[0]\n word[\"answer2\"] = ans[1]\n word[\"answer3\"] = ans[2]\n word[\"answer4\"] = ans[3]\n\n context = {\n 'words': words[:QUIZ_LENGTH],\n 'english': [word[\"english1\"], word[\"english2\"], word[\"english3\"]],\n 'level': level\n }\n\n return render(request, 'website/hsk_flashcards.html', context)\n\n\ndef wordslist(request, level):\n words = Vocab.objects.filter(hsk_level=level).order_by('id').values()\n context = {\n 'words': words,\n 'level': level\n }\n \n return render(request, 'website/hsk_wordlist.html', context)\n\n\ndef generate_answers(level, word):\n word_list = Vocab.objects.filter(~Q(simplified=word) & Q(hsk_level=level)).values()\n word_list = sorted(word_list, key=lambda k: random.random())\n\n answer_list = []\n answer_list.append((word['simplified'], word['english1']))\n answer_list.append((word_list[0]['simplified'], word_list[0]['english1']))\n answer_list.append((word_list[1]['simplified'], word_list[1]['english1']))\n answer_list.append((word_list[2]['simplified'], word_list[2]['english1']))\n\n return(answer_list)\n","repo_name":"zrogers010/mandarinflash","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"20328316208","text":"\"\"\"M/D/N queue (D=deterministic)\"\"\"\n\nimport random\nimport simpy\nimport numpy as np\nimport math\nfrom probabilities import *\nimport pandas as pd\nimport scipy.stats as st\n\ndef conf_int(mean, var, n, p=0.95):\n pnew = (p+1)/2\n zval = st.norm.ppf(pnew)\n sigma = math.sqrt(var)\n alambda = (zval*sigma)/math.sqrt(n)\n min_lambda = mean - alambda\n plus_lambda = mean + alambda\n return f\"Confidence interval: [{min_lambda:.4f} < X < {plus_lambda:.4f}] with p = {p}\"\n\n\nclass Queue(object):\n \"\"\"\n Create the initial object queue\n \"\"\"\n\n def __init__(self, env, servers, servicetime):\n self.env = env\n self.server = simpy.Resource(env, servers)\n self.servicetime = servicetime\n\n def service(self, customer):\n \"\"\"The process\"\"\"\n yield self.env.timeout(1/MU)\n\n\ndef customer(env, name, qu):\n \"\"\"Each customer has a ``name`` and requests a server\n Subsequently, it starts a process.\n need to do sthis differently though...\n \"\"\"\n\n global arrivals\n\n a = env.now\n # print(f'{name} arrives at the servicedesk at {a:.2f}')\n arrivals += 1\n\n with qu.server.request() as request:\n yield request\n\n global counter\n global waiting_time\n global leavers\n\n b = env.now\n # print('%s enters the servicedesk at %.2f.' % (name, b))\n waitingtime = (b - a)\n # print(f'{name} waiting time was {waitingtime:.2f}')\n waiting_time += waitingtime\n counter += 1\n\n yield env.process(qu.service(name))\n # print('%s leaves the servicedesk at %.2f.' % (name, env.now))\n leavers += 1\n\n\ndef setup(env, servers, servicetime, t_inter):\n \"\"\"Create a queue, a number of initial customers and keep creating customers\n approx. every 1/lambda*60 minutes.\"\"\"\n # Generate queue\n queue = Queue(env, SERVERS, MU)\n\n # Create 1 initial customer\n # for i in range(1):\n i = 0\n env.process(customer(env, f'Customer {i}', queue))\n\n # Create more customers while the simulation is running\n while True:\n yield env.timeout(np.random.exponential(1/LAMBDA, 1)[0])\n i += 1\n env.process(customer(env, f'Customer {i}', queue))\n\n\n\n\n# Setup and start the simulation\nprint('QUEUE SIMULATION\\n')\n\n### SETTINGS\nRHO= [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.975]\nMU = 1 # 1/mu is exponential service times\nSERVERS = 4 #2,4\nSIMULATIONS = 500\ncolumn = ['RHO', 'SIM_TIME', 'AVG_WAIT']\ndata_sims = []\nSIM_TIME = 500 # simulation time in time unitsSIMULATIONS = 500\n# print(f'Simulations: {SIMULATIONS}')\n\n\n# Simulations\nfor rho in RHO:\n\n LAMBDA = rho * (MU * SERVERS) # 1/lambda is exponential inter arrival times\n # Create dataframe to store important values to calculate statistics\n data = pd.DataFrame(columns=column)\n for s in range(SIMULATIONS):\n\n waiting_time = 0\n counter = 0\n arrivals = 0\n leavers = 0\n\n # Create an environment and start the setup process\n env = simpy.Environment()\n env.process(setup(env, SERVERS, MU, LAMBDA))\n\n # Execute the simulation\n env.run(until=SIM_TIME)\n\n rho = LAMBDA/(SERVERS*MU)\n avg_waiting = waiting_time/(counter)\n avg_arrivals = arrivals/SIM_TIME\n avg_leavers = leavers/SIM_TIME\n\n data.loc[s] = [rho, SIM_TIME, avg_waiting]\n\n data_sims.append(data)\n\nprint(pd.concat(data_sims))\n\npd.concat(data_sims).to_csv('data/MDN_4.txt', sep='\\t', index=False)","repo_name":"ccfelius/queueing","sub_path":"MDN_data.py","file_name":"MDN_data.py","file_ext":"py","file_size_in_byte":3463,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"9"} +{"seq_id":"70653046373","text":"import os\nimport json\nimport asyncio\nimport logging\nimport socket\nimport random\nimport time\nimport threading\n\n\nclass TemperatureProducerTcp(object):\n\n def __init__(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.ip = 'localhost'\n self.port = 50000\n\n def get_temp(self) -> int:\n return random.randint(0, 35)\n\n def run(self):\n self.socket.bind((self.ip, self.port))\n self.socket.listen(1)\n logging.info('Listening on {}:{}'.format(self.ip, self.port))\n\n while True:\n conn, addr = self.socket.accept()\n # event_loop.create_task(self.handle_connection(conn, addr))\n logging.info('Received connection from {}'.format(addr))\n t = threading.Thread(target=self.worker, args=(conn, addr))\n t.start()\n\n def worker(self, conn, addr):\n logging.info('Worker started')\n asyncio.set_event_loop(asyncio.new_event_loop())\n event_loop = asyncio.get_event_loop()\n event_loop.run_until_complete(self.handle_connection(conn, addr))\n\n async def handle_connection(self, conn, addr):\n logging.info('Accepted connection from {}'.format(addr))\n while True:\n data_dict = {\n 'temp': self.get_temp(),\n 'location': 'Bedroom',\n 'timestamp': int(time.time())\n }\n conn.sendall(str.encode(json.dumps(data_dict)))\n await asyncio.sleep(1)\n conn.close()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(format='[%(asctime)s]%(levelname)s:%(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n\n p = TemperatureProducerTcp()\n\n event_loop = asyncio.get_event_loop()\n event_loop.create_task(p.run())\n\n","repo_name":"gujianxiao/NDNMachineLearning","sub_path":"temperature_producer_tcp.py","file_name":"temperature_producer_tcp.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"20553186919","text":"import pymongo\n\nclient = pymongo.MongoClient(\n \"mongodb://admin:admin@ds021182.mlab.com:21182/c4e\")\n\ndb = client.c4e\n\ndb.posts.insert_one({\n \"title\":\"Đây là một cái title trống nhé\",\n \"author\":\"Awkward Bunny\",\n \"content\":\"Make it simple but significant\"\n})","repo_name":"awkwardbunny97/DangQuangAnh-Web-C4E32","sub_path":"Session03/Homework/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"41562643752","text":"# file=open(\"055 fruits.txt\",'r')\n# content = file.readlines()\n# file.close()\n# for i in content:\n# print(i)\n\ntemperatures=[10,-20,-289,100]\ndef c_to_f(c):\n if c>= -273.15:\n f=c*9/5+32\n f=str(f)\n with open('066.txt','a') as test:\n test.write('\\n'+f)\n\nfor t in temperatures:\n c_to_f(t)\n","repo_name":"noodlexpoodle/PY","sub_path":"10AppCourse/f2.py","file_name":"f2.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"36013437289","text":"from gensim.models.doc2vec import TaggedDocument\nfrom gensim.models import Doc2Vec\nfrom gensim.test.utils import get_tmpfile\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import svm\nfrom sklearn.preprocessing import LabelEncoder\nimport argparse\nimport joblib\nfrom dask.distributed import Client\nimport os\n\ndef process_csv(csv_filename):\n import pandas as pd\n\n df = pd.read_csv(csv_filename)\n\n df[df.isnull().any(axis=1)]\n df.drop(df[df.isnull().any(axis=1)].index, inplace=True)\n\n return df\n\ndef train_d2v_model(model, labelled_ngrams, n_epochs):\n\n model.build_vocab(labelled_ngrams)\n\n for epoch in range(n_epochs):\n model.train(labelled_ngrams, epochs=model.epochs,\n total_examples=model.corpus_count)\n print(\"Epoch #{} is complete.\".format(epoch+1))\n\n return model\n\ndef save_d2v_model(model, fpath):\n from gensim.test.utils import get_tmpfile\n\n fname = get_tmpfile(fpath)\n model.save(fname)\n\ndef load_d2v_model(fpath):\n from gensim.test.utils import get_tmpfile\n\n fname = get_tmpfile(fpath)\n return Doc2Vec.load(fname)\n\ndef save_clf(clf, fpath):\n import joblib\n\n joblib.dump(clf, fpath)\n\ndef load_clf(fpath):\n\n import joblib\n\n return joblib.load(fpath)\n\ndef print_stats(y_pred, y_test):\n from sklearn import metrics\n\n print(\"Accuracy = \" + str(metrics.accuracy_score(y_test, y_pred)))\n print(\"Precision = \" + str(metrics.precision_score(y_test, y_pred)))\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"machine learning model\")\n parser.add_argument('-c', '--csv_filename', default=None,\n help='filename for the csv dataset')\n parser.add_argument('-d', '--debug', action='store_true',\n help='turn on debug statements')\n\n args = parser.parse_args()\n\n if args.csv_filename:\n csv_filename = args.csv_filename\n else:\n parser.print_help()\n exit()\n if args.debug:\n debug = True\n else:\n debug = False\n\n if debug:\n print(\"Starting to read CSV\")\n df = process_csv(csv_filename)\n\n if debug:\n print(\"Read CSV ---> DONE!\")\n\n try:\n test_num = args.csv_filename.rsplit(\"/\")[-1].rsplit(\"_\")[0]\n except:\n test_num = 0\n\n if debug:\n print(\"Starting to process ngrams\")\n\n ngrams = df['ngram']\n labelled_ngrams = []\n for i in range(len(ngrams)):\n labelled_ngrams.append(TaggedDocument(ngrams[i].split(), [i]))\n\n if debug:\n print(\"Process ngrams ---> DONE!\")\n print(labelled_ngrams)\n if debug:\n print(\"Creating model\")\n model = Doc2Vec(dm=1, min_count=1, window=10, vector_size=150,\n sample=1e-4, negative=10)\n if debug:\n print(\"Model --> CREATED!\")\n if debug:\n print(\"Training model\")\n trained_model = train_d2v_model(model, labelled_ngrams, n_epochs=20)\n if debug:\n print(\"Model --> TRAINED!\")\n\n d2v_path = os.path.join(os.getcwd(), \"doc2vec_model{}\".format(test_num))\n save_d2v_model(trained_model, d2v_path)\n\n model_loaded = load_d2v_model(d2v_path)\n\n le = LabelEncoder()\n le.fit([\"Benign\", \"Malware\"])\n target = le.transform(df['label'])\n\n data = []\n for i in range(len(df['ngram'])):\n data.append(model_loaded[i])\n\n x_train, x_test, y_train, y_test = train_test_split(data, target,\n test_size=0.3, random_state=0)\n client = Client(processes=False)\n\n clf = svm.SVC(kernel='linear')\n\n with joblib.parallel_backend('dask'):\n clf.fit(x_train, y_train)\n\n\n y_pred = clf.predict(x_test)\n\n clf_path = \"/home/amanjain/NLP-Malware/ml/model{}.pkl\".format(test_num)\n save_clf(clf, clf_path)\n\n print(y_pred)\n print_stats(y_pred, y_test)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aman863/NLP-Malware","sub_path":"ml/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"852164753","text":"import re\n\nclass Scanner(object):\n def __init__(self, PythonWordTokens_re, PythonSymbols_re, PythonSymbols_eq, quote_regexp, WhiteSpace, comment_regexp, number_regexp, symbolicnames, eq_symbolicnames, ws_names):\n \"\"\"Takes a list of tuples and configures the scanner.\"\"\"\n # Initializes the pattern recognition engine for the scanner\n self.PythonWordTokens_re = PythonWordTokens_re\n self.PythonSymbols_re = PythonSymbols_re\n self.PythonSymbols_eq = PythonSymbols_eq\n self.quote_regexp = quote_regexp\n self.WhiteSpace = WhiteSpace\n self.comment_regexp = comment_regexp\n self.number_regexp = number_regexp\n self.symbolicnames = symbolicnames\n self.eq_symbolicnames = eq_symbolicnames\n self.ws_names = ws_names\n\n def scan(self, string):\n \"\"\"Takes a string and runs the scan on it, creating a list of tokens for later. You should keep this string around for people to access later.\"\"\"\n Scanned = [] # holds the output in the form of a list of tuples each tuple being (pattern, token)\n hold = string # Debug check 2\n falss = \"\" # Debug check 1\n Debug_print = False # Debug check 3\n\n while string != \"\":\n falss = string # Debug check 1\n \n # RECOGNITION FOR QUOTES\n if re.findall(self.quote_regexp, string):\n if Debug_print:\n print(\"some string\")\n match = re.findall(self.quote_regexp, string)[0]\n string_capture = match[0] + match[1]\n Debug_animation = [string]\n string = re.sub(self.quote_regexp, \"\", string)\n\n # FSTRING RECOGNITION\n if match[0] == \"f\":\n TokenName = 'FSTRING'\n else:\n # REGULAR STRING RECOGNITION\n TokenName = \"STRING\"\n while string[0:len(match[1])] != match[1]:\n if string[0] == \"\\\\\":\n string_capture += string[0:2]\n string = string[2:]\n else:\n string_capture += string[0]\n string = string[1:]\n Debug_animation.append(string)\n \n string_capture += match[1]\n string = string[len(match[1]):]\n Found = True\n Scanned.append((string_capture, TokenName))\n\n # KEYWORD OR PYTHON KEYWORD RECOGNITION\n elif re.findall(\"^[a-zA-Z_][a-zA-Z0-9_]*\", string):\n keyword = re.findall(\"^([a-zA-Z_][a-zA-Z0-9_]*)\", string)[0]\n if keyword in self.PythonWordTokens_re.split():\n Scanned.append((keyword, keyword.upper()))\n else:\n # Wasn't a python keyword so it must be a user defined keyword\n Scanned.append((keyword, \"KEYWORD\"))\n string = re.sub(\"^[a-zA-Z_][a-zA-Z0-9_]*\", \"\", string)\n\n else:\n # if it is not a keyword of any kind then it comes to this reg exp section\n # in which finding it in one of these loops prevents it from being searched\n # in the following loops\n Found = False\n\n # SYMBOL RECOGNITION\n for i in range(len(self.PythonSymbols_re.split())):\n if re.findall(f\"^{self.PythonSymbols_re.split()[i]}\", string):\n Scanned.append((re.findall(f\"^{self.PythonSymbols_re.split()[i]}\", string)[0], self.symbolicnames.split()[i]))\n string = re.sub(f\"^{self.PythonSymbols_re.split()[i]}\", \"\", string)\n Found = True\n break\n\n # RECOGNITION FOR SYMBOLS NOT HANDELABLE BY REGULAR EXPRESSIONS\n if not Found:\n for i in range(len(self.PythonSymbols_eq.split())):\n symbol, name = self.PythonSymbols_eq.split()[i], self.eq_symbolicnames.split()[i]\n if string[0:len(symbol)] == symbol:\n if Debug_print:\n print(\"non-regexp symbol\")\n Scanned.append((symbol, name))\n string = string[len(symbol):]\n Found = True\n break\n\n # RECOGNITION FOR WHITE SPACE\n if not Found:\n index = -1\n for i in self.WhiteSpace.split(\"|\"):\n index += 1\n if re.findall(f\"^{i}\", string):\n if Debug_print:\n print(\"white space\")\n Scanned.append((re.findall(f\"^{i}\", string)[0], self.ws_names.split()[index]))\n string = re.sub(f\"^{i}\", \"\", string)\n Found = True\n break\n\n # NUMBER RECOGNITION\n if not Found:\n for i in self.number_regexp:\n if re.findall(i, string):\n if Debug_print:\n print('number')\n Scanned.append((re.findall(i, string)[0][0], \"NUMBER\"))\n string = re.sub(i, \"\", string)\n Found = True\n break\n\n # COMMENT RECOGNITION\n if not Found:\n if re.findall(self.comment_regexp, string):\n if Debug_print:\n print(\"comment\")\n Scanned.append((re.findall(self.comment_regexp, string)[0], \"COMMENT\"))\n string = re.sub(self.comment_regexp, \"\", string)\n Found = True\n\n if falss == string: # Debug check 1\n # if falss == string then string hasn't changed which means\n # no pattern recognizes the start of string so string hasn't been\n # altered,\n # this will result in the while endlessly looping so the break down\n # below will prevent that\n print(\"Possible scenario not having associated regexp: \") # Debug check 1\n if Debug_print:\n print(f\"__{string}__\")\n break # Debug check 1\n\n return Scanned\n\n\n \n def match(self, TupleSyntaxFromScannedInput, Token):\n \"\"\"Given a list of possible tokens (these tokens come from a scanned string), and a specific token to match against, return the first token in the given list and remove it, otherwise return None\"\"\"\n if TupleSyntaxFromScannedInput[0][1] == Token:\n return TupleSyntaxFromScannedInput.pop(0)\n\n def peek(self, TupleSyntaxFromScannedInput, Token):\n \"\"\"Given a list of possible tokens (these tokens come from a scanned string), and a specific token to match against, return the first token in the given list, otherwise return None\"\"\"\n if TupleSyntaxFromScannedInput[0][1] == Token:\n return TupleSyntaxFromScannedInput[0]\n\n def push(self, TupleSyntaxFromScannedInput, Token): \n \"\"\"Push a token back on the token stream so that a later peek or match will return it.\"\"\"\n TupleSyntaxFromScannedInput.append(Token)\n\n\n\n\n\n\nPythonWordTokens_re = \"from import global class object __init__ def if True False try except pass break next last and print or return else elif as\"\nPythonSymbols_re = \"\\. != <= >= == \\+= -= < > = \\[ \\] \\{ \\} \\( \\) \\+ : , \\|\"\nPythonSymbols_eq = \"\\\\\"\nquote_regexp = \"^(f?)('''|\\\"\\\"\\\"|'|\\\")\"\nWhiteSpace = \"\\n| |[ ]+\"\ncomment_regexp = \"^(#.*)\"\nnumber_regexp = [\"^(-?([1-9]|\\.)\\d+(e\\d+|e-\\d+)?)\", \"^(-?[1-9]\\d+\\.?\\d*(e\\d+|e-\\d+)?)\", \"^(-?[0-9](\\.\\d*)?(e\\d+|e-\\d+)?)\"]\nTokenNamesForSymbols = \"DOT NOTEQ SMALL_OR_EQ GREAT_OR_EQ TWICEEQ PLUSEQ MINUSEQ LESS GREAT EQ LBRACK RBRACK LCBRACK RCBRACK LPAREN RPAREN PLUS COLON COMMA BAR\"\neq_SymbolNames = \"ESCAPE\"\nWhiteSpaceNames = \"NEWLINE INDENT WS\"\n\n\n\n\n# Opening a file down here and getting the contents\nhandle1 = open(\"?????\")\nfile_contents = handle1.read()\nhandle1.close()\n\n# Initializing the scanner\nscanner = Scanner(PythonWordTokens_re, PythonSymbols_re, PythonSymbols_eq, quote_regexp, WhiteSpace, comment_regexp, number_regexp, TokenNamesForSymbols, eq_SymbolNames, WhiteSpaceNames)\nRigid_Tuple_List = scanner.scan(file_contents)\n","repo_name":"EloMalakhi/Python-code-scanner","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":8538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"11785984572","text":"from sanic_jwt.decorators import protected, inject_user\nfrom sanic_ext import validate, openapi\nfrom sanic import json\nfrom sanic.request import Request\nfrom sanic.response import HTTPResponse\nfrom src.services.goods_services import (\n create_good_instance,\n delete_good_instance,\n update_good_instance,\n)\nfrom src.shemas.goods_shemas import CommodityCreate, CommodityUpdate\nfrom src.decorators import validate_admin_decorator, validate_exceptions\nfrom src.endpoints.admin import admin_bp as admin_goods_bp\n\n\n@admin_goods_bp.route(\"/goods\", methods=[\"POST\"])\n@openapi.definition(\n body={\"application/json\": CommodityCreate.schema()},\n summary=\"Create good\",\n)\n@inject_user()\n@protected()\n@validate_admin_decorator()\n@validate_exceptions()\n@validate(json=CommodityCreate)\nasync def create_good(\n request: Request, user: dict[str, str | int], body: CommodityCreate\n) -> HTTPResponse:\n good_id = await create_good_instance(body)\n return json({\"message\": \"good successfuly created\", \"good_id\": good_id}, status=201)\n\n\n@admin_goods_bp.route(\"/goods/\", methods=[\"PATCH\"])\n@openapi.definition(\n body={\"application/json\": CommodityUpdate.schema()},\n summary=\"Update good data\",\n)\n@inject_user()\n@protected()\n@validate_admin_decorator()\n@validate_exceptions()\n@validate(json=CommodityUpdate)\nasync def update_good(\n request: Request, user: dict[str, str | int], body: CommodityUpdate, good_id: int\n) -> HTTPResponse:\n await update_good_instance(good_id, body)\n return json(\"\", status=204)\n\n\n@admin_goods_bp.route(\"/goods/\", methods=[\"DELETE\"])\n@inject_user()\n@protected()\n@validate_admin_decorator()\nasync def delete_good(\n request: Request, user: dict[str, str | int], good_id: int\n) -> HTTPResponse:\n await delete_good_instance(good_id)\n return json(\"\", status=204)\n","repo_name":"Kimiyori/test_project","sub_path":"src/endpoints/goods_endpoints/admin_good_endpoints.py","file_name":"admin_good_endpoints.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"43198658546","text":"import subprocess\nimport argparse\nimport os\n\ndef run_command(bash_command):\n process = subprocess.Popen(bash_command.split())\n output, error = process.communicate()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--config_script',\n type=str,\n default='run_config.json',\n help='This config should mimc the config.py config json with parameters you want to override.'\n 'You can also override the parameters from config_script by passing them in directly after config_script. E.g., --train_config.batch_size 5')\n parser.add_argument('--mode',\n type=str,\n default='train',\n choices=[\"train\", \"eval\", \"dump_preds\", \"dump_embs\"])\n parser.add_argument('--base_dir',\n type=str,\n default='')\n parser.add_argument('--experiment_name',\n type=str,\n default='test')\n parser.add_argument('--tensorboard_dir',\n type=str,\n default='')\n args = parser.parse_args()\n print(\"RUN python setup.py develop\")\n run_command(\"python setup.py develop\")\n if not os.path.exists(args.tensorboard_dir):\n os.mkdir(args.tensorboard_dir)\n cmd = f\"python bootleg/run.py --config_script {args.config_script} --mode {args.mode} --base_dir {args.base_dir} --experiment_name {args.experiment_name} --tensorboard_dir {args.tensorboard_dir}\"\n print(\"RUN {}\".format(cmd))\n run_command(cmd)","repo_name":"microsoft/KC","sub_path":"papers/ReTraCk/retriever/diff/bootleg/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"9"} +{"seq_id":"18538395670","text":"# selenium can only handle HTML\n# for the pop ups which are written generally in java or javascript we use different method then html\n# first we use alert switch_to method\n\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome(executable_path=\"C:\\\\chromedriver.exe\")\ndriver.get(\"https://https://www.lcwaikiki.com/tr-TR/TR\")\n\ndriver.find_element_by_css_selector(\"#name\").send_keys(validateText)\ndriver.find_element_by_id(\"alertbtn\").click()\nalert = driver.switch_to.alert\nalertText = alert.text\nassert validateText in alertText\nalert.accept()\n\nalert.dismiss()","repo_name":"alpayalyn/seleniumwithPython","sub_path":"basicsofSelenium/Alerts.py","file_name":"Alerts.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"3091689453","text":"#!/usr/bin/env python3\n\nimport itertools\nimport operator\nimport numpy as np\nimport math\nimport string\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nsquare = [[ 1, 5, 27, 22, 28, 40, 14],\n [39, 13, 17, 30, 41, 12, 2],\n [32, 35, 24, 25, 19, 47, 34],\n [16, 33, 10, 42, 7, 44, 18],\n [ 3, 8, 45, 37, 4, 21, 20],\n [15, 46, 38, 6, 26, 48, 49],\n [ 9, 23, 31, 29, 11, 36, 43]]\nN_COLS = len(square[0])\nN_ROWS = len(square)\n#data = np.matrix(square\n\nclass Well:\n def __init__(self):\n self.time = 0\n self.fr = 1 #flow rate\n self.G = nx.DiGraph()\n self.init_node = (0, 0)\n # set up nodes\n for r in range(N_ROWS):\n for c in range(N_COLS):\n self.G.add_node((r, c), depth=square[r][c])\n\n # vertical edges\n for r in range(N_ROWS - 1):\n for c in range(N_COLS):\n delta = self.G.nodes[(r + 1, c)][\"depth\"] - self.G.nodes[(r, c)][\"depth\"]\n if delta > 0:\n self.G.add_edge((r, c), (r + 1, c), borders=1, capacity=delta)\n else:\n self.G.add_edge((r + 1, c), (r, c), borders=1, capacity=-delta)\n\n # horizontal edges\n for r in range(N_ROWS):\n for c in range(N_COLS - 1):\n delta = self.G.nodes[(r, c + 1)][\"depth\"] - self.G.nodes[(r, c)][\"depth\"]\n if delta > 0:\n self.G.add_edge((r, c), (r, c + 1), borders=1, capacity=delta)\n else:\n self.G.add_edge((r, c + 1), (r, c), borders=1, capacity=-delta)\n\n def isSink(self, node):\n # check if node has successors\n for _ in self.G.successors(node):\n return False\n maxFlow = nx.maximum_flow(self.G, self.init_node, node)\n if maxFlow > 0:\n return True\n else:\n return False\n\n def mergeNodes(self, host, child):\n child_only = []\n both = []\n for _ in self.G.successors(child):\n raise RuntimeError(\"YOU FAIL\")\n for n in self.G.predecessors(child):\n child_only.append(n)\n child_only.remove(host)\n for n in self.G.predecessors(host):\n pass\n#plt.subplot(121)\n#nx.draw_spectral(G, with_labels=True, font_weight='bold')\n#plt.show()","repo_name":"joeda/jnstrt","sub_path":"well_well_well/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"27678135955","text":"import streamlit as st\nfrom skimage import io\nfrom io import BytesIO\nimport os\nimport cv2 as cv\nimport streamlit.components.v1 as components\nimport time\nimport tensorflow as tf\nimport easyocr\nimport tensorflow_hub as hub\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom PIL import ImageColor, ImageDraw, ImageFont, ImageOps, Image\nimport tensorflow as tf\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as viz_utils\nfrom object_detection.builders import model_builder\nfrom object_detection.utils import config_util\nfrom scipy.ndimage import interpolation as inter\nimport time\nimport glob\nimport pytesseract as pt\n\ndef img_resize(im, size):\n desired_size = size\n old_size = im.shape[:2] # old_size is in (height, width) format\n\n ratio = desired_size/max(old_size)\n new_size = tuple([int(x*ratio) for x in old_size])\n\n im = cv.resize(im, (new_size[1], new_size[0]))\n\n delta_w = desired_size - new_size[1]\n delta_h = desired_size - new_size[0]\n top, bottom = delta_h//2, delta_h-(delta_h//2)\n left, right = delta_w//2, delta_w-(delta_w//2)\n\n color = [0, 0, 0]\n new_im = cv.copyMakeBorder(im, top, bottom, left, right, cv.BORDER_CONSTANT,value=color)\n return new_im, top, left\n\n\ninterpreter = tf.lite.Interpreter(model_path='../data/models/model.tflite')\ninterpreter.allocate_tensors()\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']\ncategory_index = label_map_util.create_category_index_from_labelmap('../data/models/label_map.pbtxt')\n\n\nreader = easyocr.Reader(['en'])\n\ndef detect_fn(image):\n image, shapes = detection_model.preprocess(image)\n prediction_dict = detection_model.predict(image, shapes)\n detections = detection_model.postprocess(prediction_dict, shapes)\n return detections\n\n\ndef correct_skew(image, delta=1, limit=5):\n def determine_score(arr, angle):\n data = inter.rotate(arr, angle, reshape=False, order=0)\n histogram = np.sum(data, axis=1)\n score = np.sum((histogram[1:] - histogram[:-1]) ** 2)\n return histogram, score\n\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV + cv.THRESH_OTSU)[1]\n\n scores = []\n angles = np.arange(-limit, limit + delta, delta)\n for angle in angles:\n histogram, score = determine_score(thresh, angle)\n scores.append(score)\n\n best_angle = angles[scores.index(max(scores))]\n\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv.getRotationMatrix2D(center, best_angle, 1.0)\n rotated = cv.warpAffine(image, M, (w, h), flags=cv.INTER_CUBIC, \\\n borderMode=cv.BORDER_REPLICATE)\n return best_angle, rotated\n\n# @st.cache\ndef read_files():\n# data = os.listdir('../../data/Label_image_800/')\n data = os.listdir('../data/raw/')\n data.sort()\n snaps = os.listdir('../data/snaps/')\n snaps.sort()\n return data,snaps\n\ndef st_our_model_video_tflite():\n select_min_score = st.slider('Min Score of Detecting %',min_value = 10, max_value = 100, value=20, step = 1)\n if st.button(\"Run Detector\"):\n with st.spinner('Detecting ...'):\n start_t = time.time()\n cap = cv.VideoCapture('test_video.mp4')\n frame_width = int(cap.get(3))\n frame_height = int(cap.get(4))\n out = cv.VideoWriter('detected_plate_tflite.avi',cv.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))\n if (cap.isOpened()== False):\n print(\"Error opening video stream or file\")\n while(cap.isOpened()):\n ret, real_img = cap.read()\n if ret == True:\n img = Image.fromarray(real_img.copy())\n img = img.resize((320, 320), Image.ANTIALIAS)\n real_img = np.array(real_img)\n img = np.array(img)\n top = 0\n left = 0\n image_np = img.copy()\n input_tensor = np.array(np.expand_dims(img,0), dtype=np.float32)\n input_index = interpreter.get_input_details()[0][\"index\"]\n interpreter.allocate_tensors()\n interpreter.set_tensor(input_index, input_tensor)\n interpreter.invoke()\n output_details1 = interpreter.get_output_details()[0]\n output_details2 = interpreter.get_output_details()[1]\n output_details3 = interpreter.get_output_details()[3]\n detection_scores = np.squeeze(interpreter.get_tensor(output_details1['index']))\n detection_boxes = np.squeeze(interpreter.get_tensor(output_details2['index']))\n detection_classes = np.squeeze(interpreter.get_tensor(output_details3['index'])).astype(np.int64)\n\n res_det = np.where(detection_scores>select_min_score/100)\n\n label_id_offset = 1\n image = img.copy()\n imup = []\n plate_scores = []\n ind = 0\n (im_height,im_width,ch) = real_img.shape\n detection_boxes[:,1] += left/im_width\n detection_boxes[:,3] += left*2/im_width\n detection_boxes[:,0] += top/im_height\n detection_boxes[:,2] += top*2/im_height\n ymin, xmin, ymax, xmax= detection_boxes[0]\n bbox = np.array([xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height]).astype(int)\n max_center = bbox[1]-bbox[0] + bbox[3]-bbox[2]\n max_score = 0\n for i in res_det[0]:\n ymin, xmin, ymax, xmax = detection_boxes[i]\n bbox = np.array([xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height]).astype(int)\n center = bbox[1]-bbox[0] + bbox[3]-bbox[2]\n if detection_scores[i] > max_score:\n max_score = detection_scores[i]\n max_center = center\n if np.linalg.norm(max_center - center) != 0 and np.linalg.norm(max_center - center) < 120:\n detection_boxes = np.delete(detection_boxes, i, 0)\n detection_classes = np.delete(detection_classes, i, 0)\n detection_scores = np.delete(detection_scores, i, 0)\n continue\n plate = real_img[bbox[2]:bbox[3],bbox[0]:bbox[1]].copy()\n best_angle,plate=correct_skew(plate, delta=1, limit=25)\n imup.append(plate)\n plate_scores.append(detection_scores[i])\n if len(imup) > 0:\n viz_utils.visualize_boxes_and_labels_on_image_array(\n real_img,\n detection_boxes,\n detection_classes+label_id_offset,\n detection_scores,\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=5,\n line_thickness=10,\n min_score_thresh=(select_min_score/100),\n agnostic_mode=False)\n out.write(real_img)\n if cv.waitKey(25) & 0xFF == ord('q'):\n break\n else:\n break\n\n cap.release()\n cv.destroyAllWindows()\n end_t = time.time()\n st.markdown(f\"Time used for detecting {round(end_t-start_t, 1)} s\")\n\ndef st_our_model_tflite():\n data,snaps=read_files()\n if st.checkbox(\"snaps\"):\n select_image = st.selectbox(\"Choose a picture\",options = snaps,index = 0, key = 1)\n path = '../data/snaps/'\n else:\n select_image = st.selectbox(\"Choose a picture\",options = data,index = 0, key = 1)\n path = '../data/raw/'\n select_min_score = st.slider('Min Score of Detecting %',min_value = 10, max_value = 100, value=20, step = 1)\n st.header(\"Example of detected image\")\n uploaded_file = st.file_uploader(\"Choose a file for detecting image\",[\"jpg\",\"png\"])\n col1, col2 = st.columns(2)\n col1.header(\"Image\")\n if isinstance(uploaded_file,BytesIO):\n if snaps:\n st.write(snaps)\n count = int(snaps[-1].split(\".\")[0].split('_')[1])+1\n else:\n count = int(data[-1].split(\".\")[0].split('_')[1])+1\n img = Image.open(uploaded_file)\n img = cv.cvtColor(np.array(img),cv.COLOR_BGR2RGB)\n path_write = \"../../data/snaps/car_\"+(5-len(str(count)))*\"0\"+str(count)+\".jpg\"\n col1.image(uploaded_file)\n cv.imwrite(path_write,img)\n det_img = path_write\n else:\n img = cv.imread(path+select_image)\n img = cv.cvtColor(img,cv.COLOR_BGR2RGB)\n col1.image(img,use_column_width=True)\n det_img = path+select_image\n if st.button(\"Run Detector\"):\n with st.spinner('Detecting ...'):\n start_t = time.time()\n IMAGE_PATH=det_img\n real_img = Image.open(IMAGE_PATH)\n img = real_img.copy()\n img = cv.resize(np.array(img), (320, 320), interpolation = cv.INTER_NEAREST)\n real_img = np.array(real_img)\n img = np.array(img)\n top = 0\n left = 0\n# img, top, left = np.array(img_resize(real_img,320))\n image_np = img.copy()\n input_tensor = np.array(np.expand_dims(img,0), dtype=np.float32)\n input_index = interpreter.get_input_details()[0][\"index\"]\n interpreter.allocate_tensors()\n interpreter.set_tensor(input_index, input_tensor)\n interpreter.invoke()\n output_details1 = interpreter.get_output_details()[0]\n output_details2 = interpreter.get_output_details()[1]\n output_details3 = interpreter.get_output_details()[3]\n detection_scores = np.squeeze(interpreter.get_tensor(output_details1['index']))\n detection_boxes = np.squeeze(interpreter.get_tensor(output_details2['index']))\n detection_classes = np.squeeze(interpreter.get_tensor(output_details3['index'])).astype(np.int64)\n\n res_det = np.where(detection_scores>select_min_score/100)\n\n if len(res_det[0]) > 0:\n label_id_offset = 1\n image = img.copy()\n imup = []\n plate_scores = []\n ind = 0\n (im_height,im_width,ch) = real_img.shape\n detection_boxes[:,1] += left/im_width\n detection_boxes[:,3] += left*2/im_width\n detection_boxes[:,0] += top/im_height\n detection_boxes[:,2] += top*2/im_height\n ymin, xmin, ymax, xmax= detection_boxes[0]\n bbox = np.array([xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height]).astype(int)\n max_center = bbox[1]-bbox[0] + bbox[3]-bbox[2]\n max_score = 0\n for i in res_det[0]:\n ymin, xmin, ymax, xmax = detection_boxes[i]\n bbox = np.array([xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height]).astype(int)\n center = bbox[1]-bbox[0] + bbox[3]-bbox[2]\n if detection_scores[i] > max_score:\n max_score = detection_scores[i]\n max_center = center\n if np.linalg.norm(max_center - center) != 0 and np.linalg.norm(max_center - center) < 120:\n detection_boxes = np.delete(detection_boxes, i, 0)\n detection_classes = np.delete(detection_classes, i, 0)\n detection_scores = np.delete(detection_scores, i, 0)\n continue\n plate = real_img[bbox[2]:bbox[3],bbox[0]:bbox[1]].copy()\n best_angle,plate=correct_skew(plate, delta=1, limit=25)\n imup.append(plate)\n plate_scores.append(detection_scores[i])\n if len(imup) > 0:\n viz_utils.visualize_boxes_and_labels_on_image_array(\n real_img,\n detection_boxes,\n detection_classes+label_id_offset,\n detection_scores,\n category_index,\n use_normalized_coordinates=True,\n max_boxes_to_draw=5,\n line_thickness=10,\n min_score_thresh=(select_min_score/100),\n agnostic_mode=False)\n if len(imup) > 0:\n col2.header(\"Detected Image\")\n else:\n col2.header(\"Don't detect plate\")\n col2.image(real_img)\n end_t = time.time()\n st.markdown(f\"Time used for detecting {round(end_t-start_t, 1)} s\")\n if len(res_det[0]) > 0:\n if len(imup) > 0:\n st.header(\"Detections\")\n col_pl1, col_pl2 = st.columns(2)\n st.header(\"Detected Text\")\n col_tx1, col_tx2 = st.columns(2)\n for i in range(len(imup)):\n cur_plate = imup[i]\n platetext = reader.readtext(cur_plate)\n platetext_pytes = pt.image_to_string(cur_plate)\n if i % 2 == 0:\n col_pl1.image(cur_plate, width = 300)\n col_pl1.write(\"This is plate by \" + str(round(plate_scores[i]*100)) +\" %\")\n col_pl1.write(\"Pytesseract detect \" + platetext_pytes)\n for ind, text in enumerate(platetext):\n col_tx1.write(str(ind+1) + \". \" + text[1])\n else:\n col_pl2.image(cur_plate, width = 300)\n col_pl2.write(\"This is plate by \" + str(round(plate_scores[i]*100)) +\" %\")\n col_pl2.write(\"Pytesseract detect \" + platetext_pytes)\n for ind, text in enumerate(platetext):\n col_tx2.write(str(ind+1) + \". \" + text[1])\n\n","repo_name":"Sergey9123/Armenian-Cars-License-plate-detection","sub_path":"src/python_files/our_model_tflite.py","file_name":"our_model_tflite.py","file_ext":"py","file_size_in_byte":14728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"42998665281","text":"from PIL import Image\r\nimport io\r\nimport math\r\n\r\n\r\n\r\ndef compareColor(rgb1, rgb2):\r\n #out = abs((rgb1[0] - rgb2[0]) + abs(rgb1[1] - rgb2[1]) + abs(rgb1[2] - rgb2[2]))\r\n #out = math.sqrt((rgb1[0] - rgb2[0])**2 + (rgb1[1] - rgb2[1])**2 + (rgb1[2] - rgb2[2])**2)\r\n out = math.sqrt(((rgb1[0] - rgb2[0])*.3)**2 + ((rgb1[1] - rgb2[1])*.59)**2 + ((rgb1[2] - rgb2[2])*.11)**2)\r\n #out = abs((rgb1[0] - rgb2[0])*.3 + abs(rgb1[1] - rgb2[1])*.59 + abs(rgb1[2] - rgb2[2])*.11)\r\n return out\r\n\r\n\r\n\r\n\r\ndef compare(rgb, deflist, outlist):\r\n paint = deflist[0]\r\n dif = 1000000\r\n for p in deflist:\r\n if compareColor(rgb,p) < dif:\r\n dif = compareColor(rgb,p)\r\n paint = p\r\n #print(\"closest to\")\r\n for p in outlist:\r\n if (p[0] == paint[0] and p[1] == paint[1] and p[2] == paint[2]):\r\n return False\r\n outlist.append(paint)\r\n #print(paint)\r\n\r\ndef closest(rgb, paints): #gets the closest paint in list to the rgb value\r\n paint = paints[0]\r\n dif = 10000\r\n for p in paints:\r\n if compareColor(rgb,p) < dif:\r\n dif = compareColor(rgb,p)\r\n paint = p\r\n return paint\r\n\r\ndef repaint(filename, paints):\r\n im = Image.open(filename)\r\n w, h = im.size\r\n for x in range(0, w-1):\r\n for y in range(0,h-1):\r\n newPix = closest(im.getpixel((x,y)),paints)\r\n im.putpixel((x,y), newPix)\r\n im.save(\"result.jpg\")\r\n\r\n\r\n\r\n#this one is for dealing with a reference to image by path\r\ndef main(filename, numpaints, database):\r\n deflist = [(250, 250, 250), (200, 200, 200), (150, 150, 150), (100, 100, 100), (50, 50, 50), (0, 0, 0)]\r\n outlist = []\r\n maxcolors = 15\r\n\r\n if numpaints >= 5 & numpaints <=35:\r\n maxcolors = numpaints\r\n if database!=None:\r\n deflist = database\r\n else:\r\n print(\"no list\")\r\n deflist = [(250, 250, 250), (200, 200, 200), (150, 150, 150), (100, 100, 100), (50, 50, 50), (0, 0, 0)]\r\n if filename != None:\r\n im = Image.open(filename)\r\n else:\r\n im = Image.open(filename)\r\n list = im.getcolors(im.size[0] * im.size[1])\r\n list.sort(reverse=True)\r\n j = 0\r\n #print (len(database))\r\n while (j < maxcolors and j < len(list)):\r\n #print(list[j][0])\r\n #print(list[j][1])\r\n #compare(list[j][1],deflist,outlist)\r\n if (compare(list[j][1], deflist, outlist) == False):\r\n maxcolors += 1\r\n j+= 1\r\n #print(j)\r\n\r\n# hold = outlist.copy()\r\n# hold = outlist[:]\r\n # outlist.clear()\r\n #print(len(outlist))\r\n return outlist\r\n\r\n#this one is for dealing with the image itself\r\ndef mainproper(file, numpaints, database):\r\n deflist = [(250, 250, 250), (200, 200, 200), (150, 150, 150), (100, 100, 100), (50, 50, 50), (0, 0, 0)]\r\n outlist = []\r\n maxcolors = 15\r\n\r\n if numpaints >= 5 & numpaints <=35:\r\n maxcolors = numpaints\r\n if database!=None:\r\n deflist = database\r\n if file != None:\r\n im = Image.open(io.BytesIO(file))\r\n else:\r\n return\r\n list = im.getcolors(im.size[0] * im.size[1])\r\n list.sort(reverse=True)\r\n j = 0\r\n print(\"here\")\r\n\r\n while (j < maxcolors and j < len(list)):\r\n # print(list[j][0])\r\n # print(list[j][1])\r\n # compare(list[j][1],deflist,outlist)\r\n if (compare(list[j][1], deflist, outlist) == False):\r\n maxcolors += 1\r\n j += 1\r\n # print(j)\r\n\r\n #hold = outlist.copy()\r\n #outlist.clear()\r\n #print(len(hold))\r\n return outlist\r\n\r\n#main(\"test.jpg\",15, None)","repo_name":"wash13/Paint-Analyzer","sub_path":"Analyzer.py","file_name":"Analyzer.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"2869906939","text":"import machine, onewire, ds18x20, time, ADC\n\nsensor_pin = machine.Pin(4)\npot = ADC(Pin(34))\nrole = Pin(26, Pin.OUT)\ndisp_1 = Pin(16,Pin.OUT,value = 0)\ndisp_2 = Pin(17,Pin.OUT,value = 0)\nrole.value(1) #role kapalı\n\n\ndisplaylist = [15,11,14,13,12,18,19]\ndisplayled = []\nfor seg in displaylist:\n displayled.append(Pin(seg, Pin.OUT))\n\narrSeg = [[1,1,1,1,1,1,0],\n [0,1,1,0,0,0,0],\n [1,1,0,1,1,0,1],\n [1,1,1,1,0,0,1],\n [0,1,1,0,0,1,1],\n [1,0,1,1,0,1,1],\n [1,0,1,1,1,1,1],\n [1,1,1,0,0,0,0],\n [1,1,1,1,1,1,1],\n [1,1,1,1,0,1,1]]\n\n\npot.atten(ADC.ATTN_11DB)\n\nsensor = ds18x20.DS18X20(onewire.OneWire(sensor_pin))\nrom = sensor.scan()\n\ntim0 = Timer(0)\ndef Dongu(tim0):\n sensor.convert_temp()\n sicaklik = sensor.read_temp(rom)\n istenenSicaklik = pot.read()\n map(istenenSicaklik, 0, 1023, 0, 99)\n DispYaz(istenenSicaklik)\n if istenenSicaklik >= (sicaklik-1):\n role.value(1) #röleyi kapat\n else:\n role.value(0) #röleyi aç\n\ndef map(x, i_m, i_M, o_m, o_M): #potansiyometreden gelen 0-1023 arası değeri 0-99 arasında mapleme\n return max(min(o_M, (x - i_m) * (o_M - o_m) // (i_M - i_m) + o_m), o_m)\n \ndef DispYaz(sayi):\n global rakam1, rakam2\n rakam1 = (sayi//10)%10\n rakam2 = sayi%10\n if sayi < 10:\n rakam1 = 0\n disp_1.on()\n for j in range(7):\n displayled[j].value(arrSeg[rakam1][j])\n time.sleep_ms(5)\n disp_1.off()\n disp_2.on()\n for k in range(7):\n displayled[k].value(arrSeg[rakam2][k])\n time.sleep_ms(5) \n disp_2.off() \n else:\n disp_1.on()\n for j in range(7):\n displayled[j].value(arrSeg[rakam1][j])\n time.sleep_ms(5)\n disp_1.off()\n disp_2.on()\n for k in range(7):\n displayled[k].value(arrSeg[rakam2][k])\n time.sleep_ms(5) \n disp_2.off() \n \n \n \ntim0.init(period=500, mode=Timer.PERIODIC, callback=Dongu)\n","repo_name":"barannayir/sousvideWithMicropython","sub_path":"sousvide.py","file_name":"sousvide.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"9504388437","text":"# Authur: Nasir Lawal\n# Date: 17th-08-2019\n\n\"\"\" Description\nHardness must be greater than 50.\nCarbon content must be less than 0.7.\nTensile strength must be greater than 5600.\n\nThe grades are as follows:\nGrade is 10 if all three conditions are met.\nGrade is 9 if conditions (i) and (ii) are met.\nGrade is 8 if conditions (ii) and (iii) are met.\nGrade is 7 if conditions (i) and (iii) are met.\nGarde is 6 if only one condition is met.\nGrade is 5 if none of three conditions are met.\n\nThe first line contains an integer T, \ntotal number of testcases. Then follow T lines,\neach line contains three numbers hardness, \ncarbon content and tensile strength of the steel.\n\"\"\"\n\ndef main():\n\tuser_input = int(input())\n\tfor n in range(user_input):\n\t\tnum_string = input().strip().split()\n\t\thardness = int(num_string[0])\n\t\tcarbon = float(num_string[1])\n\t\ttensile = int(num_string[2])\n\t\tif hardness > 50 and carbon < 0.7 and tensile > 5600:\n\t\t\tprint(\"10\")\n\t\telif hardness > 50 and carbon < 0.7 and tensile <= 5600:\n\t\t\tprint(\"9\")\n\t\telif hardness <= 50 and carbon < 0.7 and tensile > 5600:\n\t\t\tprint(\"8\")\n\t\telif hardness > 50 and carbon >= 0.7 and tensile > 5600:\n\t\t\tprint(\"7\")\n\t\telif hardness > 50 or carbon < 0.7 or tensile > 5600:\n\t\t\tprint(\"6\")\n\t\telif hardness <= 50 and carbon >= 0.7 and tensile <= 5600:\n\t\t\tprint(\"5\")\nif __name__ == \"__main__\":\n\tmain()","repo_name":"nasir-001/CodeCheff","sub_path":"GradeTheSteel.py","file_name":"GradeTheSteel.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"6047897148","text":"\"\"\"\r\nCreated on Sat. May, 6th, 2017\r\n\r\n@author: Lige Tan\r\n\r\n\"\"\"\r\n\r\nfrom spectral_cube import SpectralCube\r\nimport astropy.units as u\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef main():\r\n #\r\n trans = Translation()\r\n trans.arrangement()\r\n\r\n\r\nclass Translation:\r\n \r\n def __init__(self):\r\n # Initialize a Translation task of spectra\r\n # - self is the Translation to initialize\r\n \r\n self.offset_value = 3\r\n self.date = 528 ## M/DD\r\n self.start = 92 ## start number of .fits file\r\n self.end = 94 ## end number of .fits file\r\n \r\n def arrangement(self):\r\n \r\n for i in range(self.start,self.end+1):\r\n names = ['','','']\r\n for j in range(1,4):\r\n string1 = '/mnt/work/ltan/data/jcmth20140'\r\n string2 = '_000'\r\n string3 = '_0'\r\n string4 = '_reduced001_nit_000.fits'\r\n names[j-1] = string1 + str(self.date) + string2 + str(i) + string3 + str(j) + string4\r\n plt.figure(i-self.start+1)\r\n self.doppler_conversion(names[0], names[1], names[2], i)\r\n \r\n def doppler_conversion(self, file_name1, file_name2, file_name3, file_number):\r\n \r\n spectrum1 = SpectralCube.read(file_name1)\r\n spectrum1_fq_converted = spectrum1.with_spectral_unit(u.km/u.s, velocity_convention='radio')\r\n spectrum1_intensity = spectrum1.unmasked_data[:,0,0]\r\n plt.plot(spectrum1_fq_converted.spectral_axis, spectrum1_intensity)\r\n plt.hold(True)\r\n \r\n spectrum2 = SpectralCube.read(file_name2)\r\n spectrum2_fq_converted = spectrum1.with_spectral_unit(u.km/u.s, velocity_convention='radio')\r\n spectrum2_intensity = spectrum2.unmasked_data[:,0,0]\r\n length = len(spectrum2_intensity)\r\n offset = np.ones((length)) ## for m X n matrix: np.ones((length,1)), a tuple inside\r\n offset.fill(self.offset_value)\r\n spectrum2_intensity = np.add(spectrum2_intensity / u.Kelvin, offset)\r\n plt.plot(spectrum2_fq_converted.spectral_axis, spectrum2_intensity*u.Kelvin)\r\n plt.hold(True)\r\n \r\n spectrum3 = SpectralCube.read(file_name3)\r\n spectrum3_fq_converted = spectrum3.with_spectral_unit(u.km/u.s, velocity_convention='radio')\r\n spectrum3_intensity = spectrum3.unmasked_data[:,0,0]\r\n length = len(spectrum3_intensity)\r\n offset = np.ones((length)) ## for m X n matrix: np.ones((length,1)), a tuple inside\r\n offset.fill(2*self.offset_value)\r\n spectrum3_intensity = np.add(spectrum3_intensity / u.Kelvin, offset)\r\n plt.plot(spectrum3_fq_converted.spectral_axis, spectrum3_intensity)\r\n plt.hold(False)\r\n \r\n #print(spectrum1_fq_converted.spectral_axis[0])\r\n #print(spectrum1_intensity[0])\r\n #print(spectrum1[0])\r\n \r\n plt.xlim([spectrum1_fq_converted.spectral_axis[len(spectrum1)-1] * u.s / u.km, spectrum1_fq_converted.spectral_axis[0] * u.s / u.km])\r\n plt.xlabel(r'$\\ V_{LSR} $ (km/s)',fontsize = 12)\r\n plt.ylabel(r'$\\ T^\\ast_A $',fontsize = 12)\r\n plt.title('20140'+str(self.date)+': 000'+str(file_number), fontsize = 16)\r\n lgd = plt.legend(['the 1st frequency range','the 2nd frequency range','the 3rd frequency range'], bbox_to_anchor=(1,1))\r\n plt.savefig('20140'+str(self.date)+'_000'+str(file_number)+'.png', bbox_extra_artists=(lgd,), bbox_inches='tight')\r\n\r\n \r\n def export_spectra():\r\n pass\r\n\r\n\r\n\r\nmain()\r\n","repo_name":"ltanGit/Test-Python","sub_path":"Test-Python/spectrum_reader.py","file_name":"spectrum_reader.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"11776946087","text":"def solution(want, number, discount):\n answer = 0\n dict_fruit = {}\n zero_list = [0 for i in range(len(want))]\n \n for i in range(len(want)):\n dict_fruit[want[i]] = number[i]\n \n #일단 첫 열흘에 대한 연산\n for i in range(10):\n if discount[i] in list(dict_fruit.keys()):\n dict_fruit[discount[i]] -= 1\n \n #number원소의 합은 10이므로 철저하게 zero_list와 같아야함.\n if list(dict_fruit.values()) == zero_list:\n answer += 1\n if len(discount) == 10:\n return answer\n \n #한칸씩 이동하면서 과일 추가, 삭제\n for i in range(len(discount) - 10):\n if discount[i] in list(dict_fruit.keys()):\n dict_fruit[discount[i]] += 1\n if discount[i + 10] in list(dict_fruit.keys()):\n dict_fruit[discount[i + 10]] -= 1\n if list(dict_fruit.values()) == zero_list:\n answer += 1\n \n return answer","repo_name":"yongdori00/coding-test","sub_path":"프로그래머스/lv2/131127. 할인 행사/할인 행사.py","file_name":"할인 행사.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"15702132220","text":"import sys\n\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\npoints_arr = []\ntime_arr = []\ntotal = 0\n\nfor i in range(N):\n points, time = map(int, input().split())\n points_arr.append(points)\n time_arr.append(time)\n\n\ndef dfs(level, current, limit):\n global total\n\n if limit > M:\n return\n\n if level == N:\n if current > total:\n total = current\n\n else:\n dfs(level + 1, current + points_arr[level], limit + time_arr[level])\n dfs(level + 1, current, limit)\n\n\ndfs(0, 0, 0)\nprint(total)\n","repo_name":"lbu0413/problems_to_solve","sub_path":"ps_with_python/section7/first_attempt/최대점수구하기.py","file_name":"최대점수구하기.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"21254386452","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.array([80,85,90,95,100,105,110,115,120,125])\ny = np.array([240,250,260,270,280,290,300,310,320,330])\n\nplt.title(\"Jam Olahraga\")\nplt.xlabel(\"Detak Jantung\")\nplt.ylabel(\"Kalori Terbakar\")\n\nplt.plot(x,y)\nplt.grid(color='green', linestyle='--', linewidth='0.5')\nplt.show()\n","repo_name":"Kyotazel/python_LKS","sub_path":"matplotlib-numpy/06-Grid.py","file_name":"06-Grid.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"14324448797","text":"\"\"\"\nProcess an image file using tesseract.\n\"\"\"\nimport os\n\nfrom .utils import ShellParser\n\n\nclass Parser(ShellParser):\n \"\"\"Extract text from various image file formats using tesseract-ocr\"\"\"\n\n def extract(self, filename, **kwargs):\n\n # if language given as argument, specify language for tesseract to use\n if 'language' in kwargs:\n args = ['tesseract', filename, 'stdout', '-l', kwargs['language']]\n else:\n args = ['tesseract', filename, 'stdout']\n\n stdout, _ = self.run(args)\n return stdout\n","repo_name":"deanmalmgren/textract","sub_path":"textract/parsers/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":3657,"dataset":"github-code","pt":"9"} +{"seq_id":"19833463811","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nr\"\"\"Provide the task solver that uses quadratic programming.\n\nA quadratic program (QP) is written in standard form [1]_ as:\n\n.. math::\n\n x^* =& \\arg \\min_x \\; \\frac{1}{2} x^T Q x + p^T x \\\\\n & \\text{subj. to } \\; \\begin{array}{c} Gx \\leq h \\\\ Fx = k \\end{array}\n\n\nwhere :math:`x` is the vector being optimized (in robotics, it can be joint positions, velocities, torques, ...),\n\"the matrix :math:`Q` and vector :math:`p` are used to define any quadratic objective function of these variables,\nwhile the matrix-vector couples :math:`(G,h)` and :math:`(F,k)` respectively define inequality and equality\nconstraints\" [1]_. Inequality constraints can include the lower bounds and upper bounds of :math:`x` by setting\n:math:`G` to be the identity matrix or minus this one, and :math:`h` to be the upper or minus the lower bounds.\n\nFor instance, the quadratic objective function :math:`||Ax - b||_{W}^2` (where :math:`W` is a symmetric weight matrix)\nis given in the standard form as:\n\n.. math:: ||Ax - b||_{W}^2 = (Ax - b)^\\top W (Ax - b) = x^\\top A^\\top W A x - 2 b^\\top W A x + b^\\top W b\n\nwhere the last term :math:`b^\\top W b` can be removed as it does not depend on the variables we are optimizing (i.e.\n:math:`x`). We thus have :math:`Q = A^\\top W A` a symmetric matrix and :math:`p = -2 A^\\top W b`.\n\nNote that if we had instead :math:`||Ax - b||_{W}^2 + c^\\top x`, this could be rewritten as:\n\n.. math:: ||Ax - b||_{W}^2 + c^\\top x = x^\\top A^\\top W A x - (2 b^\\top W A - c^\\top) x + b^\\top W b,\n\ngiving :math:`Q = A^\\top W A` and :math:`p = (c - 2 A^\\top W b)`.\n\nMany control problems in robotics can be formulated as a quadratic programming problem. For instance, let's assume\nthat we want to optimize the joint velocities :math:`\\dot{q}` given the end-effector's desired position and velocity\nin task space. We can define the quadratic problem as:\n\n.. math:: || J(q) \\dot{q} - v_c ||^2\n\nwhere :math:`v_c = K_p (x_d - x) + K_d (v_d - \\dot{x})` (using PD control), with :math:`x_d` and :math:`x` the desired\nand current end-effector's position respectively, and :math:`v_d` is the desired velocity. The solution to this\ntask (i.e. optimization problem) is the same solution given by `inverse kinematics`. Now, you can even obtain the\ndamped least squares inverse kinematics by adding a soft task such that\n:math:`||J(q)\\dot{q} - v_c||^2 + ||q||^2` is optimized (note that :math:`||q||^2 = ||A q - b||^2`, where :math:`A=I` is\nthe identity matrix and :math:`b=0` is the zero/null vector).\n\n\n- **Soft** priority tasks: with soft-priority tasks, the quadratic programming problem being minimized for :math:`n`\n such tasks is given by:\n\n .. math::\n\n \\begin{array}{c}\n x^* = \\arg \\min_x ||A_1 x - b_1||_{W_1}^2 + ||A_2 x - b_2 ||_{W_2}^2 + ... + ||A_n x - b_n ||_{W_n}^2 \\\\\n \\text{subj. to } \\; \\begin{array}{c} Gx \\leq h \\\\ Fx = k \\end{array}\n \\end{array}\n\n Often, the weight PSD matrices :math:`W_i` are just positive scalars :math:`w_i`. This problem can notably be solved\n by stacking the :math:`A_i` one of top of another, and stacking the :math:`b_i` and :math:`W_i` in the same manner,\n and solving :math:`||A x - b||_{W}^2`. This is known as the augmented task. When the matrices :math:`A_i` are\n Jacobians this is known as the augmented Jacobian (which can sometimes be ill-conditioned).\n\n- **Hard** priority tasks: with hard-priority tasks, the quadratic programming problem for :math:`n` tasks is defined\n in a sequential manner, where the first most important task will be first optimized, and then the subsequent tasks\n will be optimized one after the other. Thus, the first task to be optimized is given by:\n\n .. math::\n\n x_1^* =& \\arg \\min_x \\; ||A_1 x - b_1||^2 \\\\\n & \\text{subj. to } \\; \\begin{array}{c} G_1 x \\leq h_1 \\\\ F_1 x = k_1 \\end{array}\n\n while the second next most important task that would be solved is given by:\n\n .. math::\n\n x_2^* =& \\arg \\min_x \\; ||A_2 x - b_2||^2 \\\\\n & \\begin{array}{cc} \\text{subj. to }\n & G_2 x \\leq h_2 \\\\\n & F_2 x = k_2 \\\\\n & A_1 x = A_1 x_1^* \\\\\n & G_1 x \\leq h_1 \\\\\n & F_1 x = k_1,\n \\end{array}\n\n until the :math:`n` most important task, given by:\n\n .. math::\n\n x_n^* =& \\arg \\min_x \\; ||A_n x - b_n||^2 \\\\\n & \\begin{array}{cc} \\text{subj. to } & A_1 x = A_1 x_1^* \\\\\n & ... \\\\\n & A_{n-1} x = A_{n-1} x_{n-1}^* \\\\\n & G_1 x \\leq h_1 \\\\\n & ... \\\\\n & G_n x \\leq h_n \\\\\n & F_1 x = k_1 \\\\\n & ... \\\\\n & F_n x = k_n. \\end{array}\n\n By setting the previous :math:`A_{i-1} x = A_{i-1} x_{i-1}^*` as equality constraints, the current solution\n :math:`x_i^*` won't change the optimality of all higher priority tasks.\n\nReferences:\n - [1] \"Quadratic Programming in Python\" (https://scaron.info/blog/quadratic-programming-in-python.html), Caron, 2017\n - [2] \"OpenSoT: A whole-body control library for the compliant humanoid robot COMAN\", Rocchi et al., 2015\n - [3] \"Robot Control for Dummies: Insights and Examples using OpenSoT\", Hoffman et al., 2017\n\"\"\"\n\nimport numpy as np\n\nfrom pyrobolearn.priorities.solvers.task_solver import TaskSolver\nfrom pyrobolearn.optimizers.qpsolvers_optimizer import QP\n\n\n__author__ = \"Brian Delhaisse\"\n__copyright__ = \"Copyright 2019, PyRoboLearn\"\n__credits__ = [\"OpenSoT (Enrico Mingo Hoffman and Alessio Rocchi, C++)\", \"Brian Delhaisse (Python + doc)\"]\n__license__ = \"GNU GPLv3\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Brian Delhaisse\"\n__email__ = \"briandelhaisse@gmail.com\"\n__status__ = \"Development\"\n\n\nclass QPTaskSolver(TaskSolver):\n r\"\"\"QP Task Solver.\n\n The QP task solver uses QP to solve a task or stack of tasks.\n \"\"\"\n\n def __init__(self, task, method='quadprog', epsilon=1.e-8):\n \"\"\"\n Initialize the task solver.\n\n Args:\n task (Task): Priority tasks.\n method (str): QP method/library to use. Select between ['cvxopt', 'cvxpy', 'ecos', 'gurobi', 'mosek',\n 'osqp', 'qpoases', 'quadprog']\n epsilon (float): this small amount is added to the diagonal elements of the quadratic matrix such that it\n is positive definite.\n \"\"\"\n solver = QP(method=method)\n super(QPTaskSolver, self).__init__(task, solver)\n self.epsilon = epsilon\n\n ##############\n # Properties #\n ##############\n\n @property\n def solver(self):\n \"\"\"Return the optimizer/solver instance.\"\"\"\n return self._solver\n\n @solver.setter\n def solver(self, solver):\n \"\"\"Set the optimizer/solver instance.\"\"\"\n if solver is not None and not isinstance(solver, QP):\n raise TypeError(\"Expecting the given 'solver' to be an instance of `QP`, instead got: \"\n \"{}\".format(type(solver)))\n self._solver = solver\n\n ###########\n # Methods #\n ###########\n\n def update(self):\n \"\"\"Update the priority task; compute the matrices and vectors to be used later in the `solve` method.\"\"\"\n self.task.update()\n\n def solve(self, x0=None, update=False):\n \"\"\"Solve the priority task.\n\n Args:\n x0 (np.array[float[N]], None): initial guess for the optimized variables.\n\n Returns:\n np.array[float[N]]: the optimized variables.\n \"\"\"\n # update if necessary\n if update:\n self.update()\n\n # get task objectives and constraints\n # objectives\n As = self.task.A\n Qs = self.task.Q\n ps = self.task.p\n # constraints\n Gs = self.task.G\n hs = self.task.h\n Fs = self.task.F\n ks = self.task.k\n\n # if not a stack of tasks, just transform the task objectives/constraints into lists\n if not self.task.is_stack_of_tasks():\n As, Qs, ps = [As], [Qs], [ps]\n\n # solve\n x_opt, x_opts, x_projs = x0, [], []\n for i in range(len(As)):\n # objectives\n Q = Qs[i]\n p = ps[i]\n\n # constraints\n G = Gs[:i+1]\n G = np.concatenate(G) if len(G) > 0 else None\n h = hs[:i+1]\n h = np.concatenate(h) if len(h) > 0 else None\n F = Fs[:i+1] + As[:i]\n F = np.concatenate(F) if len(F) > 0 else None\n k = ks[:i+1] + x_projs[:i]\n k = np.concatenate(k) if len(k) > 0 else None\n\n # make sure that Q is PD\n diag_Q = np.einsum('ii->i', Q)\n diag_Q += self.epsilon\n # print(\"evals(Q): \", np.linalg.eigvals(Q))\n\n # print(\"Q: \", Q.shape)\n # print(\"p: \", p.shape)\n # print(\"G: \", G.shape)\n # print(\"h: \", h.shape)\n # print(\"F: \", F.shape)\n # print(\"k: \", k.shape)\n\n # solve (by starting from previous optimized solution)\n x_opt = self.solver.optimize(Q=Q, p=p, x0=x_opt, G=G, h=h, A=F, b=k)\n x_opts.append(x_opt)\n # print(\"Loss: {}\".format(self.task.loss(x_opt)))\n\n # project best solution (will be used later in the stack for constraints)\n if i < len(As) - 1:\n x_proj = As[i].dot(x_opt)\n x_projs.append(x_proj)\n\n # return optimized variables\n return x_opt\n","repo_name":"robotlearn/pyrobolearn","sub_path":"pyrobolearn/priorities/solvers/qp_task_solver.py","file_name":"qp_task_solver.py","file_ext":"py","file_size_in_byte":9420,"program_lang":"python","lang":"en","doc_type":"code","stars":386,"dataset":"github-code","pt":"9"} +{"seq_id":"3076237927","text":"import pytz\n\nfrom test.support import fixtures\nfixtures.global_setup()\n\nimport unittest\n\nfrom support.testutils import open_doc, open_json\nfrom support.object_checks import has_keys, is_url, is_li_compatible_dict\nfrom resources.lib import parsex\nfrom resources.lib import errors\n\nsetUpModule = fixtures.setup_local_tests\ntearDownModule = fixtures.tear_down_local_tests\n\n\nclass TestScrapeJson(unittest.TestCase):\n def test_scrape_json_watch_pages(self):\n for page in ('html/index.html', 'html/watch-itv1.html'):\n get_page = open_doc(page)\n data = parsex.scrape_json(get_page())\n self.assertIsInstance(data, dict)\n\n def test_invalid_page(self):\n # no __NEXT_DATA___\n self.assertRaises(errors.ParseError, parsex.scrape_json, '{data=[1,2]}')\n\n\nclass Generic(unittest.TestCase):\n def test_build_url(self):\n url = parsex.build_url('Astrid and Lily Save the World', '10a2921')\n self.assertEqual('https://www.itv.com/watch/astrid-and-lily-save-the-world/10a2921', url)\n url = parsex.build_url('Astrid and Lily Save the World', '10a2921', None)\n self.assertEqual('https://www.itv.com/watch/astrid-and-lily-save-the-world/10a2921', url)\n url = parsex.build_url('Astrid and Lily Save the World', '10a2921', '10a2921a0001')\n self.assertEqual('https://www.itv.com/watch/astrid-and-lily-save-the-world/10a2921/10a2921a0001', url)\n url = parsex.build_url('Astrid & Lily Save the World', '10a2921', '10a2921a0001')\n self.assertEqual('https://www.itv.com/watch/astrid-and-lily-save-the-world/10a2921/10a2921a0001', url)\n url = parsex.build_url('#50/50-heroes?', '10a1511')\n self.assertEqual('https://www.itv.com/watch/5050-heroes/10a1511', url)\n url = parsex.build_url('Paul Sinha: Shout Out To My Ex', '10a3819')\n self.assertEqual('https://www.itv.com/watch/paul-sinha-shout-out-to-my-ex/10a3819', url)\n url = parsex.build_url(\"Watch Thursday's ITV Evening News\", '10a3819')\n self.assertEqual('https://www.itv.com/watch/watch-thursdays-itv-evening-news/10a3819', url)\n\n def test_sort_title(self):\n self.assertEqual('my title', parsex.sort_title('My Title'))\n self.assertEqual('title', parsex.sort_title('The Title'))\n self.assertEqual('thetitle', parsex.sort_title('TheTitle'))\n\n def test_parse_hero(self):\n data = open_json('html/index-data.json')\n for item_data in data['heroContent']:\n obj = parsex.parse_hero_content(item_data)\n has_keys(obj, 'type', 'show')\n is_li_compatible_dict(self, obj['show'])\n # An item of unknown type\n item = data['heroContent'][0]\n item['contentType'] = 'some new type'\n self.assertIsNone(parsex.parse_hero_content(item))\n # Invalid item\n item = {'contentType': 'special', 'title': False}\n self.assertIsNone(parsex.parse_hero_content(item))\n\n def test_parse_slider(self):\n data = open_json('html/index-data.json')\n for item_name, item_data in data['editorialSliders'].items():\n obj = parsex.parse_slider(item_name, item_data)\n has_keys(obj, 'type', 'show')\n is_li_compatible_dict(self, obj['show'])\n\n def test_parse_collection_title(self):\n data = open_json('html/collection_just-in_data.json')['collection']['shows']\n # film - Valentine's Day\n item = parsex.parse_collection_item(data[2])\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n self.assertIs(item['playable'], True)\n # series - The Twelve\n item = parsex.parse_collection_item(data[1])\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n self.assertIs(item['playable'], False)\n # episode - Kavos Weekender\n item = parsex.parse_collection_item(data[3])\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n self.assertIs(item['playable'], True)\n # Brand - Jonathan Ross' Must-Watch Films\n item = parsex.parse_collection_item(data[13])\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n self.assertIs(item['playable'], False)\n # An invalid item\n item = parsex.parse_collection_item({})\n self.assertIsNone(item)\n\n def test_parse_collection_title_from_main_page(self):\n data = open_json('html/index-data.json')['editorialSliders']['editorialRailSlot1']['collection']['shows']\n item = parsex.parse_collection_item(data[0])\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n\n def test_parse_news_collection_item(self):\n data = open_json('html/index-data.json')['shortFormSliderContent'][0]['items']\n tz_uk = pytz.timezone('Europe/London')\n # a short new item\n item = parsex.parse_news_collection_item(data[1], tz_uk, \"%H-%M-%S\")\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n # NOTE: As of 20-7-23 all news collection item appear to have the same structure\n # Just need to test a bit longer to be sure.\n # a new item like a normal catchup episode\n # item = parsex.parse_news_collection_item(data[-1], tz_uk, \"%H-%M-%S\")\n # has_keys(item, 'playable', 'show')\n # is_li_compatible_dict(self, item['show'])\n\n # An invalid item\n item = parsex.parse_news_collection_item({}, None, None)\n self.assertIsNone(item)\n\n\n def test_parse_trending_collection_item(self):\n data = open_json('html/index-data.json')['trendingSliderContent']['items']\n item = parsex.parse_trending_collection_item(data[1])\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n # An invalid item\n item = parsex.parse_trending_collection_item({})\n self.assertIsNone(item)\n\n def test_parse_episode_title(self):\n data = open_json('html/series_miss-marple_data.json')\n item = parsex.parse_episode_title(data['seriesList'][0]['titles'][0])\n is_li_compatible_dict(self, item)\n\n # Episodes where field episodeTitle = None\n data = open_json('html/series_bad-girls_data.json')\n title_obj = data['seriesList'][6]['titles'][0]\n item = parsex.parse_episode_title(title_obj)\n is_li_compatible_dict(self, item)\n\n # Episode where field seriesNumber is not a number, but 'other episodes'.\n data = open_json('html/series_midsummer-murders.json')\n series = data['seriesList'][-1]\n self.assertEqual('Other Episodes', series['seriesLabel'])\n title_obj = series['titles'][0]\n item = parsex.parse_episode_title(title_obj)\n is_li_compatible_dict(self, item)\n\n # Paid episode\n title_obj['premium'] = True\n item = parsex.parse_episode_title(title_obj)\n is_li_compatible_dict(self, item)\n self.assertTrue('premium' in item['info']['plot'].lower())\n\n def test_parse_search_result(self):\n # These files contain programmes, episodes, films and specials both and without a specialProgramm field.\n for file in ('search/search_results_mear.json', 'search/search_monday.json'):\n data = open_json(file)\n for result_item in data['results']:\n item = parsex.parse_search_result(result_item)\n has_keys(item, 'playable', 'show')\n is_li_compatible_dict(self, item['show'])\n\n # unknown entity type\n search_result = data['results'][0]\n search_result['entityType'] = 'dfgs'\n self.assertIsNone(parsex.parse_search_result(search_result))","repo_name":"dimkroon/itvx-for-kodi","sub_path":"test/local/test_parsex.py","file_name":"test_parsex.py","file_ext":"py","file_size_in_byte":7974,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"9"} +{"seq_id":"12923760337","text":"url = 'https://vincentarelbundock.github.io/Rdatasets/csv/robustbase/ambientNOxCH.csv'\ndf_sample = pd.read_csv(url, parse_dates=1, index_col=1)\n\n# dfの準備\ndf = df_sample.iloc[:, 1:]\n\n# df_monthlyの準備\ndf_monthly = df.copy()\ndf_monthly.index = df_monthly.index.map(lambda x: x.month)\ndf_monthly = df_monthly.groupby(level=0).sum()","repo_name":"mura5726/study_pydata","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"13828595637","text":"from numpy.core.fromnumeric import shape\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.applications.efficientnet import preprocess_input as effnet_preprocess_input\nfrom tensorflow.keras.applications.xception import preprocess_input as xception_preprocess_input\nfrom tensorflow.keras.applications.nasnet import preprocess_input as nasnet_preprocess_input\nfrom flask import Flask, request, jsonify\nfrom PIL import Image\nimport io\nimport os\nimport numpy as np\n\nMODEL_PATH = os.path.join('static', 'trained_model')\n\napp: Flask = Flask(__name__)\napp.config['UPLOAD_MODEL'] = MODEL_PATH\n\nthreshold = 0.5\nmodel_list = {'effnet': {'filename': 'EffNet.h5', 'image_size': 512},\n 'xception': {'filename': 'Xception.h5', 'image_size': 512},\n 'nasnetmobile': {'filename': 'NasNetMobile.h5', 'image_size': 224},\n }\n\n\ndef prepeocess(image, size):\n img = image.read()\n img = Image.open(io.BytesIO(img))\n img = img.convert(\"RGB\")\n img = img.resize((size, size))\n img = np.array(img)\n return np.expand_dims(img, axis=0)\n\n\ndef let_predict(model_name, image):\n if model_name == 'effnet':\n img = effnet_preprocess_input(image)\n return model_effnet.predict(img)\n elif model_name == 'xception':\n img = xception_preprocess_input(image)\n return model_xception.predict(img)\n elif model_name == 'nasnetmobile':\n img = nasnet_preprocess_input(image)\n return model_nasnetmobile.predict(img)\n\n\ndef check_class(result):\n if result[0][0] < threshold:\n prob = 100-result[0][0]*100\n return 'defect', prob\n else:\n return 'Ok', result[0][0]*100\n\n\nmodel_effnet = load_model(os.path.join(\n MODEL_PATH, model_list['effnet']['filename']))\nprint('model_effnet uploaded.')\n\nmodel_xception = load_model(os.path.join(\n MODEL_PATH, model_list['xception']['filename']))\nprint('model_xception uploaded.')\n\nmodel_nasnetmobile = load_model(os.path.join(\n MODEL_PATH, model_list['nasnetmobile']['filename']))\nprint('model_nasnetmobile uploaded.')\n\n\n@app.route('/healthcheck', methods=['GET'])\ndef healthcheck():\n return \"This server is healthy\"\n\n\n@app.route('/predict/', methods=['POST'])\ndef predict(model_name):\n if model_name.lower() in model_list:\n model_name = model_name.lower()\n predicted = []\n for image in request.files.getlist(\"image\"):\n filename = image.filename\n img = prepeocess(image,\n model_list[model_name][\"image_size\"])\n result = let_predict(model_name, img)\n predict, prob = check_class(result)\n predicted.append(\n {filename: {\"predict\": predict, \"probability\": prob}})\n print(f\"{filename} : prediced as {predict} with probability {prob}%\")\n return jsonify(predicted)\n else:\n return \"This model is not avaliable.\"\n\n\ndef main():\n app.run(host=\"0.0.0.0\", port=\"5000\", debug=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pswpung/cast_defection","sub_path":"cast_defection_project/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"11867951561","text":"import datetime\nimport faulthandler\nimport unittest\n\nfaulthandler.enable() # to debug seg faults and timeouts\n\nimport cf\n\n# Note: it is important we test on the cf logging config rather than the\n# generic Python module logging (i.e. 'cf.logging' not just 'logging').\n# Also, mimic the use in the codebase by using a module-specific logger:\nlog_name = __name__\nlogger = cf.logging.getLogger(log_name)\n\n\nclass dummyClass:\n \"\"\"Dummy class acting as container to test methods as proper\n instance methods, mirroring their context in the codebase.\"\"\"\n\n def __init__(self, verbose=None):\n self.verbose = verbose\n\n self.debug_message = \"A major clue to solving the evasive bug\"\n self.detail_message = \"In practice this will be very detailed.\"\n self.info_message = \"This should be short and sweet\"\n self.warning_message = \"Best pay attention to this!\"\n\n def func_2(self, good_kwarg=True, traceback=False, bad_kwarg=False):\n \"\"\"Dummy function, otherwise trivial, where a True boolean\n passed as a traceback keyword argument will ultimately raise an\n error.\"\"\"\n if traceback:\n cf.functions._DEPRECATION_ERROR_KWARGS(\n self,\n \"another_func\",\n traceback=True,\n version=\"some version\",\n )\n return good_kwarg\n\n @cf.decorators._deprecated_kwarg_check(\"traceback\", version=\"some version\")\n def decorated_func_2(self, good_kwarg=True, traceback=False):\n \"\"\"Dummy function equivalent to 'func_2', but a decorator\n manages the logic to raise the error on use of a deprecated\n keyword argument.\"\"\"\n return good_kwarg\n\n # Not testing 'bad_kwarg' here other than to the extent that it does not\n # stop 'traceback from causing the expected deprecation-related error.\n @cf.decorators._deprecated_kwarg_check(\n \"traceback\", \"bad_kwarg\", version=\"some version\"\n )\n def multikwarg_decorated_func_2(\n self, good_kwarg=True, traceback=False, bad_kwarg=False\n ):\n \"\"\"Dummy function equivalent to 'func_2', but a decorator\n manages the logic to raise the error on use of a deprecated\n keyword argument.\"\"\"\n return good_kwarg\n\n @cf.decorators._manage_log_level_via_verbose_attr\n def decorated_logging_func(self):\n \"\"\"Dummy method to test _manage_log_level_via_verbose_attr.\n\n In particular, to test it interfaces with self.verbose\n correctly.\n\n \"\"\"\n logger.debug(self.debug_message)\n logger.detail(self.detail_message)\n logger.info(self.info_message)\n logger.warning(self.warning_message)\n\n\nclass DecoratorsTest(unittest.TestCase):\n \"\"\"Test decorators module.\n\n These are unit tests on the self-contained decorators applied to an\n artificial, trivial & not cf-python specific class, so for the cases\n where decorators are imported directly from cf, there is no need to\n duplicate such tests which are already in the cf test suite.\n\n \"\"\"\n\n def setUp(self):\n self.test_only = []\n\n def test_deprecated_kwarg_check(self):\n test_class = dummyClass()\n\n # Test without (or with default) deprecated keyword argument\n res_1 = test_class.func_2(good_kwarg=\"good\")\n res_2 = test_class.decorated_func_2(good_kwarg=\"good\")\n res_3 = test_class.func_2(good_kwarg=\"good\", traceback=False)\n res_4 = test_class.decorated_func_2(good_kwarg=\"good\", traceback=False)\n test_class.multikwarg_decorated_func_2(\n good_kwarg=\"good\", traceback=False\n )\n self.assertEqual(res_1, res_2)\n self.assertEqual(res_2, \"good\")\n self.assertEqual(res_3, res_4)\n self.assertEqual(res_4, \"good\")\n\n # Test with deprecated keyword argument\n with self.assertRaises(cf.functions.DeprecationError):\n test_class.func_2(good_kwarg=\"good\", traceback=True)\n with self.assertRaises(cf.functions.DeprecationError):\n test_class.decorated_func_2(good_kwarg=\"good\", traceback=True)\n with self.assertRaises(cf.functions.DeprecationError):\n test_class.func_2(traceback=True, bad_kwarg=\"bad\")\n with self.assertRaises(cf.functions.DeprecationError):\n test_class.multikwarg_decorated_func_2(\n traceback=True, bad_kwarg=\"bad\"\n )\n\n def test_manage_log_level_via_verbose_attr(self):\n # Order of decreasing severity/verbosity is crucial to one test below\n levels = [\"WARNING\", \"INFO\", \"DETAIL\", \"DEBUG\"]\n\n # Note we test assertions on the root logger object, which is the\n # one output overall at runtime, but the specific module logger name\n # should be registered within the log message:\n example_class = dummyClass()\n log_message = [\n f\"WARNING:{log_name}:{example_class.warning_message}\",\n f\"INFO:{log_name}:{example_class.info_message}\",\n f\"DETAIL:{log_name}:{example_class.detail_message}\",\n f\"DEBUG:{log_name}:{example_class.debug_message}\",\n ]\n\n for level in levels:\n # Important! Need to initialise class inside this loop not\n # outside it or, it retains the verbosity attribute value set\n # for the previous loop (0, i.e disable, so nothing emerges!)\n test_class = dummyClass()\n cf.log_level(level) # reset to level\n\n # Default verbose(=None) cases: log_level should determine output\n with self.assertLogs(level=cf.log_level().value) as catch:\n test_class.decorated_logging_func()\n\n for msg in log_message:\n # log_level should prevent messages less severe appearing:\n if levels.index(level) >= log_message.index(msg):\n self.assertIn(msg, catch.output)\n else: # less severe, should be effectively filtered out\n self.assertNotIn(msg, catch.output)\n\n # Cases where verbose is set; value should override log_level...\n\n # Highest verbosity case (note -1 == 'DEBUG', highest verbosity):\n # all messages should appear, regardless of global log_level:\n for attr in (-1, \"DEBUG\", \"debug\", \"Debug\", \"DeBuG\"):\n test_class.verbose = attr\n with self.assertLogs(level=-1) as catch:\n test_class.decorated_logging_func()\n for msg in log_message:\n self.assertIn(msg, catch.output)\n\n # Lowest verbosity case ('WARNING' / 1) excluding special case of\n # 'DISABLE' (see note above): only warning messages should appear,\n # regardless of global log_level value set:\n for attr in (1, \"WARNING\", \"warning\", \"Warning\", \"WaRning\"):\n test_class.verbose = attr\n with self.assertLogs(level=1) as catch:\n test_class.decorated_logging_func()\n for msg in log_message:\n if msg.split(\":\")[0] == \"WARNING\":\n self.assertIn(msg, catch.output)\n else:\n self.assertNotIn(msg, catch.output)\n\n # Boolean cases for testing backwards compatibility...\n\n # ... verbose=2 should be equivalent to verbose=3 now:\n test_class.verbose = True\n with self.assertLogs(level=3) as catch:\n test_class.decorated_logging_func()\n for msg in log_message:\n if msg.split(\":\")[0] == \"DEBUG\":\n self.assertNotIn(msg, catch.output)\n else:\n self.assertIn(msg, catch.output)\n\n # ... verbose=0 should be equivalent to verbose=0 now, so\n # test along with 'DISABLE' special case below...\n\n # Special 'DISABLE' (0) case: note this needs to be last as we\n # reset the log_level to it but need to use 'NOTSET' for the\n # assertLogs level, which sends all log messages through:\n for attr in (0, \"DISABLE\", \"disable\", \"Disable\", \"DisAblE\"):\n test_class.verbose = attr\n with self.assertLogs(level=\"NOTSET\") as catch:\n # Note: get 'AssertionError' if don't log anything at all,\n # so to avoid this and allow check for disabled logging,\n # first log something then disable and check that no other\n # messages emerge:\n logger.info(\n \"Purely to keep 'assertLog' happy: see comment!\"\n )\n cf.log_level(\"DISABLE\")\n test_class.decorated_logging_func()\n for msg in log_message: # nothing else should be logged\n self.assertNotIn(msg, catch.output)\n\n # verbose=0 should be equivalent in behaviour to verbose=0\n test_class.verbose = False\n with self.assertLogs(level=\"NOTSET\") as catch:\n logger.info(\"Purely to keep 'assertLog' happy: see previous!\")\n test_class.decorated_logging_func()\n for msg in log_message: # nothing else should be logged\n self.assertNotIn(msg, catch.output)\n\n\nif __name__ == \"__main__\":\n print(\"Run date:\", datetime.datetime.now())\n cf.environment()\n print()\n unittest.main(verbosity=2)\n","repo_name":"NCAS-CMS/cf-python","sub_path":"cf/test/test_decorators.py","file_name":"test_decorators.py","file_ext":"py","file_size_in_byte":9540,"program_lang":"python","lang":"en","doc_type":"code","stars":80,"dataset":"github-code","pt":"9"} +{"seq_id":"35496021085","text":"from django.shortcuts import render\nfrom store.models import Collection, Product, Cart, CartItem\nimport time\n\n\ndef say_hello(request):\n cart = Cart()\n cart.save()\n item1 = CartItem()\n item1.cart = cart\n item1.quantity = 1\n item1.product = Product(pk=23)\n item1.save()\n return render(request, \"hello.html\", {\"name\": \"Mosh\"})\n","repo_name":"arashgl/first_django_project","sub_path":"playground/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"26974300150","text":"import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport terrain_dataset as td\nimport yaml\n# %matplotlib inline\nimport time\nimport os\n\ndata_loader = td.DataLoader()\n\nbatch_size = 32\nHEIGHT = 128\nWIDTH = 128\nEPOCHS = 1000\nBATCHES = int(data_loader.dataset_len_train/batch_size)\nTEST_BATCHES = int(data_loader.dataset_len_test/batch_size)\nSAVE_MODEL = True\nchannels = 1\nSCALING_FACTOR = 1\n# image_dimension = '{}*WIDTH*channels'.\nlr = 0.0005\n\nn_latent = 8\ntimestamp = time.strftime(\"%Y_%m_%d_%H_%M_%S\")\n\n\n#LOG FILE\nconfig_file = os.getcwd()+'/logfile/config/{}.yaml'.format(timestamp)\nusers = {'image_dimension':'{}*{}*{}'.format(HEIGHT,WIDTH,channels),'epochs':EPOCHS,'scaling_factor':SCALING_FACTOR,'batch_size':batch_size,'lr_starting': lr,'lr_constant':True,'weights_init':'xavier','latent_variable':n_latent}\nwith open(config_file, 'w') as f: \n\tdata = yaml.dump(users, f) \n\nX_in = tf.placeholder(dtype=tf.float32, shape=[None, HEIGHT, WIDTH,channels], name='X')\nY = tf.placeholder(dtype=tf.float32, shape=[None, HEIGHT, WIDTH,channels], name='Y')\n\nY_flat = tf.reshape(Y, shape=[-1, channels*HEIGHT * WIDTH])\nkeep_prob = tf.placeholder(dtype=tf.float32, shape=(), name='keep_prob')\n\ndec_in_channels =1\n\nreshaped_dim = [-1, 1, 1, 9216]\ndef lrelu(x, alpha=0.3):\n\treturn tf.maximum(x, tf.multiply(x, alpha))\n\ndef encoder(X_in, keep_prob):\n\tactivation = lrelu\n\twith tf.variable_scope(\"encoder\", reuse=None):\n\t\tX = tf.reshape(X_in, shape=[-1, HEIGHT, WIDTH, channels])\n\t\tx = tf.layers.conv2d(X, filters = 32, kernel_size=4, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\t\tx = tf.layers.conv2d(x, filters = 64, kernel_size=4, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\t\tx = tf.layers.conv2d(x, filters = 128, kernel_size=4, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\t\tx = tf.layers.conv2d(x, filters = 256, kernel_size=4, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\n\n\t\tx = tf.contrib.layers.flatten(x)\n\t\tprint(\"######shape of flattened x:\", x.shape)\n\n\t\tmn = tf.layers.dense(x, units=n_latent)\n\t\tsd = 0.5 * tf.layers.dense(x, units=n_latent) \n\t\tepsilon = tf.random_normal(tf.stack([tf.shape(x)[0], n_latent])) \n\t\tz = mn + tf.multiply(epsilon, tf.exp(sd))\n\t\t\n\t\treturn z, mn, sd\n\ndef decoder(sampled_z, keep_prob):\n\twith tf.variable_scope(\"decoder\", reuse=None):\n\t\tx = tf.layers.dense(sampled_z, units=9216, activation=tf.nn.relu)\n\t\tx = tf.reshape(x, reshaped_dim)\n\t\tx = tf.layers.conv2d_transpose(x, filters=128, kernel_size=5, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\t\tx = tf.layers.conv2d_transpose(x, filters=64, kernel_size=5, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\t\tx = tf.layers.conv2d_transpose(x, filters=32, kernel_size=6, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\t\tx = tf.layers.conv2d_transpose(x, filters=16, kernel_size=6, strides=2, activation=tf.nn.relu,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False)) ##for size 128*128\n\t\tx = tf.layers.conv2d_transpose(x, filters=dec_in_channels, kernel_size=2, strides=2, activation=tf.nn.sigmoid,kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=False))\n\n\t\timg = tf.reshape(x, shape=[-1, HEIGHT, WIDTH,1])\n\t\treturn img\n\n\nwith tf.device('/device:gpu:1'):\n\tsampled, mn, sd = encoder(X_in, keep_prob)\n\tdec = decoder(sampled, keep_prob)\nbeta = 10\nunreshaped = tf.reshape(dec, [-1,1*HEIGHT*WIDTH])\nimg_loss = tf.reduce_sum(tf.squared_difference(unreshaped, Y_flat), 1)\nlatent_loss = (-0.5 * tf.reduce_sum(1.0 + 2.0 * sd - tf.square(mn) - tf.exp(2.0 * sd), 1))\n# loss = tf.reduce_mean(img_loss + latent_loss)\nloss = tf.reduce_mean(img_loss) + beta*tf.reduce_mean(latent_loss)\n\n# global_step = tf.Variable(0, trainable=False)\nstarter_learning_rate = lr\n# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,10000, 0.96, staircase=True)\n\noptimizer = tf.train.AdamOptimizer(starter_learning_rate).minimize(loss)\ntf.summary.scalar('loss',loss)\nmerged = tf.summary.merge_all()\n# print(\"######\",dir(optimizer))\nsaver = tf.train.Saver()\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\nsess = tf.Session(config=config)\nsess.run(tf.global_variables_initializer())\ntrain_writer = tf.summary.FileWriter(\"/home/cobra/abhimanyu_research/terrain_classifier/logfile/logdir_2_{}/summary_train\".format(timestamp), sess.graph)\ntest_writer = tf.summary.FileWriter(\"/home/cobra/abhimanyu_research/terrain_classifier/logfile/logdir_2_{}/summary_test\".format(timestamp), sess.graph)\n'''\nimport time\nprint(\"hello\")\ntime.sleep(10)\n# test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')\n'''\ncount = 0\n\nfor i in range(EPOCHS):\n\n\tsummary_test_total = 0\n\tsummary_train_total = 0\n\n\ttotal_batch = data_loader.train_data(batch_size = batch_size, scaling_factor = SCALING_FACTOR) #[batch x batch_size x 128 x 128 x 2]\n\t# print(\"Batch shape\",total_batch.shape)\n\ttest_batch = data_loader.test_data(batch_size = batch_size, scaling_factor = SCALING_FACTOR)\n\t# print(total_batch.shape)\n\t# loss_epoch = 0\n\tfor j in range(BATCHES):\n\t\tbatch = [np.reshape(b, [HEIGHT,WIDTH,1]) for b in total_batch[j,:,:,:,0]]\n\t\t# print(\"total_batch\",np.array(batch).shape)\n\t\t# plt.imshow(batch[0][:][:][0])\n\t\t# plt.show()\n\n\t# \t# print(\"######Shape of array, \",np.array(batch).shape)\n\t\t_,loss_train,summary_train = sess.run([optimizer, loss,merged] ,feed_dict = {X_in: batch, Y: batch})\n\t\t# print(\"Batches\", j)\n\n\t\t# count += 1\n\t\t# print(\"Type\",int(summary_train))\n\t\t# summary_train_total += summary_train\n\t\t# count+ = 1\n\ttrain_writer.add_summary(summary_train, i)\n\n\tfor k in range(TEST_BATCHES):\n\t\tbatch_test = [np.reshape(b, [HEIGHT,WIDTH,1]) for b in test_batch[k,:,:,:,0]]\n\t\t# print(\"Shape of array, \",np.array(batch_test).shape)\n\t\t_,loss_test,summary_test = sess.run([optimizer, loss,merged] ,feed_dict = {X_in: batch_test, Y: batch_test})\n\t\t# print(\"Batche/s\", k)\n\t\t# summary_test_total += summary_test\n\t\t# count+ = 1\n\n\ttest_writer.add_summary(summary_test,i)\n\tprint(\"After epoch {}, the train loss and test loss is {} , {}\".format(i,loss_train,loss_test))\n\tif not i % 10:\n\t\tif SAVE_MODEL:\n\t\t\tsave_model = saver.save(sess,\"/home/cobra/abhimanyu_research/terrain_classifier/logfile/temp_models_{}/{}_model.ckpt\".format(timestamp,i/10))\n\t\t\tprint(\"Model saved in path: %s\" % save_model)\n\t\tls, d, i_ls, d_ls, mu, sigm = sess.run([loss, dec, img_loss, latent_loss, mn, sd], feed_dict = {X_in: batch_test, Y: batch_test})\n\t\t# print(\"Decoded image is \",np.sum(d[0]))\n\t\t# plt.imshow(np.reshape(batch[0], [HEIGHT, WIDTH]))\n\t\t# plt.show()\n\t\t# plt.imshow(d[0,:,:,0])\n\t\t# plt.show()\n\t\tprint(i, ls, np.mean(i_ls), np.mean(d_ls))\n\n\n","repo_name":"Abhimanyu8713/beta-vae","sub_path":"terrain_vae_v1.py","file_name":"terrain_vae_v1.py","file_ext":"py","file_size_in_byte":6972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"1530919173","text":"from flask import Flask, request, jsonify\nimport model\n\napp = Flask(__name__)\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n cqpa = request.form.get('cqpa')\n iq = request.form.get('iq')\n profile_score = request.form.get('profile_score')\n\n result = {\n \"cqpa\" : cqpa,\n \"iq\" : iq,\n \"profile_score\" : profile_score\n }\n\n # return jsonify(result)\n return jsonify(model.predict(1,1,27))\n\n@app.route(\"/\")\ndef hello_world():\n return \"

    Hello, hahaah World!

    \"\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n \n ","repo_name":"Benngki/stuntmed.github.io","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"26830721398","text":"#coding: utf8\n\nimport keras\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.preprocessing.image import load_img, img_to_array, array_to_img\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\n\nimport numpy as np\n#from sklearn.model_selection import train_test_split\nfrom PIL import Image\nfrom tqdm import tqdm\n\nimport glob\nfrom datetime import datetime\n\nTARGET_SIZE = (32,32)\nbatch_size = 128\nepochs = 100\noutput_size = 2\nlast_activation = 'sigmoid'\n#last_activation = 'softmax'\n\ndef get_dataset(dirname):\n dog_files = glob.glob(dirname+'/dog/*.jpg')\n cat_files = glob.glob(dirname+'/cat/*.jpg')\n\n X = []\n Y = []\n\n for filename in tqdm(dog_files):\n img = load_img(filename, target_size=TARGET_SIZE)\n array = img_to_array(img) / 255\n X.append(array)\n Y.append([0,1])\n\n for filename in tqdm(cat_files):\n img = load_img(filename, target_size=TARGET_SIZE)\n array = img_to_array(img) / 255\n X.append(array)\n Y.append([1,0])\n\n return (np.array(X).astype('float32'), np.array(Y).astype('float32'))\n\ndef create_model():\n model = Sequential()\n model.add(Conv2D(32,(3,3), input_shape=(TARGET_SIZE[0],TARGET_SIZE[1],3)))\n model.add(Activation('relu'))\n #model.add(Conv2D(32, (3,3)))\n #model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(64,(3,3), padding='same'))\n model.add(Activation('relu'))\n #model.add(Conv2D(64,(3,3)))\n #model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n #model.add(Dense(64))\n #model.add(Activation('relu'))\n #model.add(Dropout(0.5))\n model.add(Dense(output_size))\n model.add(Activation(last_activation))\n\n model.summary()\n\n return model\n\ndef img_show(img):\n from PIL import Image\n pil_img = Image.fromarray(np.uint8(img))\n pil_img.show()\n\ndef main(traindir='./train', testdir='./test'):\n '''from keras.datasets import cifar10\n (x_train, y_train),(x_test,y_test) = cifar10.load_data()\n x_train=x_train.astype('float32')/255.0\n x_test=x_test.astype('float32')/255.0\n y_train=keras.utils.to_categorical(y_train,10)\n y_test=keras.utils.to_categorical(y_test,10)'''\n\n x_train, y_train = get_dataset(traindir)\n x_test, y_test = get_dataset(testdir)\n\n #x,y = get_dataset('../dataset')\n #x_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.50)\n model = create_model()\n model.compile(loss='mean_squared_error',\n optimizer=keras.optimizers.Adam(),\n metrics=['accuracy'])\n\n es_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')\n history = model.fit(x_train, y_train,\n shuffle = True,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test),\n callbacks=[es_cb])\n score = model.evaluate(x_test, y_test, verbose=0)\n print(\"Test loss: \", score[0])\n print(\"Test accuracy: \", score[1])\n\n name = datetime.now().strftime(\"%Y%m%d_%H%M%S.h5\")\n model.save('models/{0}'.format(name))\n print(\"Model is saved as {0}\".format(name))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Yuki-Mori/KerasHelloWorld","sub_path":"dog_and_cat.py","file_name":"dog_and_cat.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"6459577437","text":"from collections import deque\nimport sys\nread = sys.stdin.readline\ninf = sys.maxsize\n\nn, m = map(int, read().split())\nbridge = list([] for _ in range(n+1))\nfor _ in range(m):\n a, b, c = map(int, read().split())\n bridge[a].append([b, c])\n# 1번 노드로 부터의 거리 저장\ndp = list(inf for _ in range(n+1))\ndp[1] = 0\n# deque내에 점이 포함되어 있는지 검사 + 방문 횟수 검사\n# 양수면 deque내에 포함\ncheck = list(0 for _ in range(n+1))\n\npoint = deque([1])\ncheck[1] = 1\n\nwhile point:\n a = point.popleft()\n check[a] *= (-1)\n for i, w in bridge[a]:\n if dp[i] > dp[a]+w:\n dp[i] = dp[a]+w\n if check[i] <= 0:\n check[i] = check[i]*(-1)+1\n point.append(i)\n if check[i] >= n:\n print(-1)\n sys.exit()\nfor d in dp[2:]:\n print(d if d != inf else -1)\n","repo_name":"youseop/Problem_solutions","sub_path":"BAEKJOON/11657_타임머신.py","file_name":"11657_타임머신.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"10448104915","text":"from enum import Enum\r\n\r\nclass TokenType(Enum):\r\n Number = 1\r\n Identifier = 2\r\n Equals = 3\r\n OpenParen = 4\r\n CloseParen = 5\r\n BinaryOperator = 6\r\n Let = 7\r\n NIL = 8 # Not In Language\r\n\r\nKEYWORDS = {\r\n \"let\": TokenType.Let,\r\n}\r\n\r\nSPECIALCHARS = {\r\n \"(\" : TokenType.OpenParen,\r\n \")\" : TokenType.CloseParen,\r\n \"+\" : TokenType.BinaryOperator,\r\n \"-\" : TokenType.BinaryOperator,\r\n \"*\" : TokenType.BinaryOperator,\r\n \"/\" : TokenType.BinaryOperator,\r\n \"=\" : TokenType.Equals,\r\n}\r\n\r\nclass Token:\r\n def __init__(self, value, type):\r\n self.value = value\r\n self.type = type\r\n\r\ndef token(value, type):\r\n return Token(value, type)\r\n\r\n# Use regular expressions in Python\r\nimport re\r\n\r\ndef isskippable(str):\r\n return str == ' ' or str == '\\n' or str == '\\t'\r\n\r\ndef identifyComplexToken(str):\r\n reserved = KEYWORDS.get(str)\r\n if reserved is not None:\r\n return token(str,reserved)\r\n elif bool(re.search(r'^[0-9]+$', str)):\r\n return token(str, TokenType.Number)\r\n elif bool(re.search(r\"^[a-zA-Z]+$\", str)):\r\n return token(str, TokenType.Identifier)\r\n else:\r\n return token(str, TokenType.NIL)\r\n\r\n\r\n\r\ndef tokenize(sourceCode):\r\n tokens = []\r\n src = list(sourceCode)\r\n currentString = \"\"\r\n\r\n while src:\r\n special = SPECIALCHARS.get(src[0])\r\n if(special is not None):\r\n if(currentString != \"\"):\r\n tokens.append(identifyComplexToken(currentString))\r\n currentString = \"\"\r\n tokens.append(token(src.pop(0),special))\r\n elif(not isskippable(src[0])):\r\n currentString = currentString + src.pop(0)\r\n else:\r\n if(currentString == \"\"):\r\n src.pop(0)\r\n else:\r\n tokens.append(identifyComplexToken(currentString))\r\n currentString = \"\"\r\n src.pop(0)\r\n\r\n if(currentString != \"\"):\r\n tokens.append(identifyComplexToken(currentString))\r\n\r\n\r\n return tokens\r\n\r\nwith open(\"./test.txt\", \"r\") as file:\r\n source = file.read()\r\n for token in tokenize(source):\r\n print(token.type, \" \", token.value)\r\n\r\n","repo_name":"Guzmance/Software-Tokenizador-TEO","sub_path":"lexer_prueba.py","file_name":"lexer_prueba.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"14275891607","text":"import random\n\nimport requests\n\n\nclass HtmlDownloader(object):\n def __init__(self):\n self.UA_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/7.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; GWX:MANAGED)',\n 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; GWX:MANAGED)'\n ]\n self.proxies_list = [{'proxy': 'http:\\\\10.220.70.254:808'}, {'proxy': 'http:\\\\10.221.70.254:808'},\n {'proxy': 'http:\\\\10.222.70.254:808'}, {'proxy': 'http:\\\\10.223.70.254:808'}]\n self.headers = {'User-Agent': random.choice(self.UA_list), 'Referer': 'https://www.hc360.com/'}\n\n def download(self, url):\n if url is None:\n return None\n response = requests.get(url, headers=self.headers, proxies=random.choice(self.proxies_list), timeout=60)\n if response.status_code != 200:\n return None\n return response.text\n\n def download_content(self, url):\n if url is None:\n return None\n response = requests.get(url, headers=self.headers, proxies=random.choice(self.proxies_list), timeout=60)\n if response.status_code != 200:\n return None\n return response.content","repo_name":"GodKerwin/crawlhc","sub_path":"crawl/html_downloader.py","file_name":"html_downloader.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"42619004062","text":"def tobin(x):\n return tobin(x // 2) + [x % 2] if x > 1 else [x]\n\n\ndef todec(l):\n if len(l) == 0:\n return 0\n a = l.pop()\n return 2 * todec(l) + a\n\n\ndef code(n):\n step = 0\n li = []\n while n > 0:\n l = tobin(n)[1:]\n li = l + li\n n = len(l)\n step += 1\n return step * [1] + [0] + li\n\n\ndef decode(l):\n step = 0\n while l.pop(0) == 1:\n step += 1\n if step == 0:\n return 0\n N = 1\n for _ in range(step - 1):\n li = [1]\n for _ in range(N):\n li += [l.pop(0)]\n N = todec(li)\n return N\n\n\nprint([decode(code(x)) for x in range(20)])\n","repo_name":"lgarcin/TIPE","sub_path":"2016/levenshtein.py","file_name":"levenshtein.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"38585122934","text":"\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Layer, Dense, Conv2D, AveragePooling2D, Dropout\nfrom tensorflow_addons.layers import GroupNormalization\nimport numpy as np\n\n\ndef get_timestep_embedding(timesteps, embedding_dim: int):\n #timestep embedding for self attentional layers.\n\n assert len(timesteps.shape) == 1 \n\n half_dim = embedding_dim // 2\n emb = np.log(10000) / (half_dim - 1)\n emb = tf.exp(tf.range(half_dim, dtype=tf.float32) * -emb)\n \n emb = tf.cast(timesteps, dtype=tf.float32)[:, None] * emb[None, :]\n emb = tf.concat([tf.sin(emb), tf.cos(emb)], axis=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = tf.pad(emb, [[0, 0], [0, 1]])\n assert emb.shape == [timesteps.shape[0], embedding_dim]\n return emb\n\nclass downsample(Layer):\n #reduces the spatial dimension by 2.\n def __init__(self, c, with_conv, name=None):\n super().__init__(name=name)\n if with_conv:\n self.down = Conv2D(c, 3, padding='same', strides=2)\n else:\n self.down = AveragePooling2D()\n \n def call(self, x, index):\n return self.down(x)\n \nclass upsample(Layer):\n #increases the spatial dimension by 2\n def __init__(self, c, with_conv, name=None):\n super().__init__(name=name)\n self.with_conv = with_conv\n if self.with_conv:\n self.up = Conv2D(c, 3, padding='same')\n\n def call(self, x, index):\n B, H, W, C = x.shape\n x = tf.image.resize(x, size=[H * 2, W * 2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n if self.with_conv:\n x = self.up(x)\n return x\n\nclass resnet_block(Layer):\n #residual convolutional block\n def __init__(self, c, name=None, drop_rate=0.0, groups=32):\n super().__init__(name=name) \n self.c = c\n self.drop_rate = drop_rate\n \n self.conv1 = Conv2D(c, 3, padding='same')\n self.conv2 = Conv2D(c, 3, padding='same')\n \n self.norm1 = GroupNormalization(groups=groups) \n self.norm2 = GroupNormalization(groups=groups)\n self.temb_proj = Dense(c) \n\n if drop_rate > 0.01:\n self.drop_fn = Dropout(drop_rate)\n else:\n self.drop_fn = tf.identity\n\n def build(self, input_shape):\n if input_shape[-1] != self.c:\n self.skip_conv = Dense(self.c)\n else:\n self.skip_conv = None\n\n def call(self, x, index):\n residual = tf.identity(x)\n x = tf.nn.swish(self.norm1(x))\n x = self.conv1(x)\n x = self.drop_fn(x)\n \n x += self.temb_proj(tf.nn.swish(index))[:, None, None, :]\n x = tf.nn.swish(self.norm2(x))\n x = self.conv2(x)\n x = self.drop_fn(x)\n\n if self.skip_conv is not None:\n residual = self.skip_conv(residual)\n \n return x + residual \n\nclass attn_block(Layer):\n #self attentional block\n def __init__(self, c, name=None, drop_rate=0.0, groups=32):\n super().__init__(name=name) \n self.c = c\n self.k = Dense(c)\n self.norm = GroupNormalization(groups=groups)\n self.proj_out = Dense(c)\n self.q = Dense(c)\n self.v = Dense(c)\n \n if drop_rate > 0.01:\n self.drop_fn = Dropout(drop_rate)\n else:\n self.drop_fn = tf.identity\n \n def build(self, input_shape):\n if input_shape[-1] != self.c:\n self.skip = Dense(self.c)\n else:\n self.skip = None\n\n def call(self, x, index):\n B, H, W, C = x.shape\n residual = tf.identity(x)\n x = self.norm(x)\n q, k, v = self.q(x), self.k(x), self.v(x)\n\n w = tf.einsum('bhwc,bHWc->bhwHW', q, k) * (int(C) ** (-0.5))\n w = tf.reshape(w, [B, H, W, H * W])\n w = tf.nn.softmax(w, -1)\n w = tf.reshape(w, [B, H, W, H, W])\n x = tf.einsum('bhwHW,bHWc->bhwc', w, v)\n\n x = self.drop_fn(x)\n\n if self.skip is not None:\n residual = self.skip(residual)\n\n x = self.proj_out(x)\n return x + residual\n","repo_name":"tcl9876/denoising_synthesis","sub_path":"code/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"29185628258","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\n# Setting up the webdriver\ndriver = webdriver.Chrome()\n\ndriver.get(\"https://www.asos.com/\")\ntime.sleep(3)\nsearch_bar = driver.find_element(\"id\",\"chrome-search\")\nsearch_bar.send_keys(\"shirt\")\n\nsearch_bar.send_keys(Keys.RETURN)\n\n# Waiting for the search results page to load\ntime.sleep(5)\n\nassert \"shirt\" in driver.title\n\nselect_radio = driver.find_element(\"xpath\", \"/html/body/div[1]/div/div[2]/header/section/div/div/div/a[1]\");\nselect_radio.click()\n\ntime.sleep(5)\n\nlaptop_link = driver.find_element(\"xpath\",\"/html/body/div[1]/div/main/div/div/div[1]/div[2]/div/div[1]/section/article[1]/a/div[1]/img\");\n\nlaptop_link.click()\n\ntime.sleep(5)\n\n\n# Closing the webdriver\ndriver.close()\n","repo_name":"niteshRawat070/Assignment3STM","sub_path":"Assignment3/Assignment3_8876091.py","file_name":"Assignment3_8876091.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"19507696820","text":"# Напишите программу, удаляющую из текста все слова, содержащие \"\"абв\"\".\n\nwith open(\"text.txt\", \"w\", encoding = 'utf-8') as f:\n f.write(\"Умениеабв создавать файлыабв в Python открывает массу новых возможностейабв — например,\\nпозволяет хранить данные, сохраняя их согласованность для разных пользователей.\\n\")\n\nwith open('text.txt', encoding = 'utf-8') as f:\n s = f.read()\n # print(s)\n\n# find = 'абв' \n# if find in s:\n# print('true')\n\nns = s.replace('абв', '')\n# print(ns)\n\nwith open('new_text.txt','w', encoding = 'utf-8') as f:\n f.write(ns)","repo_name":"YuliiaMiheikina/Python","sub_path":"Homework/hw_5/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"29396473148","text":"import discord\nfrom discord.ext import commands\nimport random\nimport asyncio\nimport os\nimport logging\nimport json\nimport datetime\nimport sys\nimport cogs.activeguard\n\nnow = datetime.datetime.now()\nif not os.path.exists('logs'):\n os.makedirs('logs')\n\"\"\"logging.basicConfig(filename=f'logs/{now.year}_{now.month}_{now.day}_{now.hour}-{now.minute}-{now.second}.txt',\n filemode='a',\n format=\"[%(asctime)s] [%(levelname)8s] --- %(message)s (%(name)s - %(filename)s:%(lineno)s)\",\n datefmt='%H:%M:%S',\n level=logging.INFO)\"\"\"\n\nclass CustomFormatter(logging.Formatter):\n\n grey = \"\\x1b[38;20m\"\n yellow = \"\\x1b[33;20m\"\n red = \"\\x1b[31;20m\"\n bold_red = \"\\x1b[31;1m\"\n reset = \"\\x1b[0m\"\n format = \"[%(asctime)s] [%(levelname)8s] --- %(message)s (%(name)s - %(filename)s:%(lineno)s)\"\n\n FORMATS = {\n logging.DEBUG: grey + format + reset,\n logging.INFO: grey + format + reset,\n logging.WARNING: yellow + format + reset,\n logging.ERROR: red + format + reset,\n logging.CRITICAL: bold_red + format + reset\n }\n\n def format(self, record):\n log_fmt = self.FORMATS.get(record.levelno)\n formatter = logging.Formatter(log_fmt, '%H:%M:%S')\n return formatter.format(record)\n\nlogFormatter = CustomFormatter()\nrootLogger = logging.getLogger()\nrootLogger.setLevel(logging.INFO)\n\nfileHandler = logging.FileHandler(f'logs/{now.year}_{now.month}_{now.day}_{now.hour}-{now.minute}-{now.second}.txt')\nfileHandler.setFormatter(logFormatter)\nrootLogger.addHandler(fileHandler)\n\nconsoleHandler = logging.StreamHandler()\nconsoleHandler.setFormatter(logFormatter)\nrootLogger.addHandler(consoleHandler)\n\n\nclass KidneyBotConfig:\n def __init__(self, conf):\n self.token = conf['token']\n self.dbstring = conf['dbstring']\n self.owner_id = int(conf['ownerid'])\n self.report_channel = int(conf['report_channel'])\n self.perspective_api_key = conf.get('perspective_api_key')\n\n\nwith open('config.json', 'r') as f:\n config = KidneyBotConfig(json.load(f))\n\n\nclass Bot(commands.Bot):\n\n def __init__(self, command_prefix, owner_id, intents):\n super().__init__(\n command_prefix=command_prefix,\n owner_id=owner_id,\n intents=intents\n )\n import motor.motor_asyncio\n client = motor.motor_asyncio.AsyncIOMotorClient(config.dbstring)\n self.database = client.data\n self.config = config\n\n async def setup_hook(self):\n await self.tree.sync()\n self.add_view(cogs.activeguard.ReportView())\n\n async def addcurrency(self, user: discord.User, value: int, location: str):\n n = await self.database.currency.count_documents({\"userID\": str(user.id)})\n if n == 1:\n doc = await self.database.currency.find_one({\"userID\": str(user.id)})\n if location == 'wallet':\n await self.database.currency.update_one({'userID': str(user.id)},\n {'$set': {'wallet': str(int(doc['wallet']) + value)}})\n elif location == 'bank':\n await self.database.currency.update_one({'userID': str(user.id)},\n {'$set': {'bank': str(int(doc['bank']) + value)}})\n else:\n wallet, bank = (0, 0)\n if location == 'wallet':\n wallet = value\n elif location == 'bank':\n bank = value\n await self.database.currency.insert_one({\n \"userID\": str(user.id),\n \"wallet\": str(wallet),\n \"bank\": str(bank),\n \"inventory\": []\n })\n \n async def log(self, guild: discord.Guild, actiontype: str, action: str, reason: str = None, user: discord.User = None, target: discord.User = None, message: discord.Message = None, color: discord.Color = None):\n doc = await self.database.automodsettings.find_one({'guild': guild.id})\n if doc is None:\n return\n if doc.get('log_channel') is None:\n return\n\n color = discord.Color.red() if color is None else color\n \n embed = discord.Embed(title=f'{actiontype}',\n description=f'{action}\\n**User:** {user.mention} ({user.id})' + \n (f\"**Target:** {target.mention} ({target.id})\" if target is not None else \"\") +\n (f\"\\n**Reason:** {reason}\\n\" if reason is not None else \"\") +\n (f'**Message:** ```{message.content}```' if message is not None else ''),\n color=color)\n embed.set_footer(text=f'Automated logging by kidney bot')\n await self.get_channel(doc['log_channel']).send(embed=embed)\n\n\nbot = Bot(command_prefix=commands.when_mentioned_or('kb.'),\n owner_id=config.owner_id,\n intents=discord.Intents.all()\n )\n\nstatuses = [\"with the fate of the world\", \"minecraft\"]\n\n\nasync def status():\n await bot.wait_until_ready()\n while not bot.is_closed():\n currentstatus = random.choice(statuses)\n await bot.change_presence(activity=discord.Game(name=currentstatus))\n await asyncio.sleep(10)\n\n\n@bot.listen('on_ready')\nasync def on_ready():\n logging.info(f'We have logged in as {bot.user}')\n\n\n@bot.listen('on_guild_join')\nasync def on_guild_join(guild):\n n = await bot.database.serverbans.count_documents({\"id\": str(guild.id)})\n if n > 0:\n doc = await bot.database.serverbans.find_one({\"id\": str(guild.id)})\n embed = discord.Embed(title=f\"{guild} is banned.\",\n description=f\"Your server *{guild}* is banned from using **{bot.user.name}**.\",\n color=discord.Color.red())\n embed.add_field(name=f\"You can appeal by contacting __**{bot.get_user(766373301169160242)}**__.\",\n value=\"\\u2800\")\n embed.add_field(name=\"Reason\", value=f\"```{doc['reason']}```\")\n embed.set_footer(text=bot.user, icon_url=bot.user.avatar)\n await guild.owner.send(embed=embed)\n await guild.leave()\n\n\n@bot.listen('on_guild_remove')\nasync def on_guild_remove(guild):\n await bot.database.bans.remove_many({\"serverID\": str(guild.id)})\n await bot.database.prefixes.remove_many({\"id\": str(guild.id)})\n\n\n@bot.command()\n@commands.is_owner()\nasync def testLog(ctx, actiontype, action, reason, user: discord.User):\n await bot.log(ctx.guild, actiontype, action, reason, user)\n\n\n@bot.command()\n@commands.is_owner()\nasync def load(ctx, extension: str):\n try:\n os.rename(f'cogs/-{extension}.py', f'cogs/{extension}.py')\n await bot.load_extension(f'cogs.{extension}')\n await ctx.reply(f'Loaded cog {extension}')\n logging.info(f'{extension.capitalize()} cog loaded.')\n except Exception as e:\n await ctx.reply(f'Could not load cog {extension}\\n`{e}`')\n\n\n@bot.command()\n@commands.is_owner()\nasync def unload(ctx, extension: str):\n try:\n await bot.unload_extension(f'cogs.{extension}')\n os.rename(f'cogs/{extension}.py', f'cogs/-{extension}.py')\n await ctx.reply(f'Unlodaded cog {extension}')\n logging.info(f'{extension.capitalize()} cog unloaded.')\n except Exception as e:\n await ctx.reply(f'Could not unload cog {extension}\\n`{e}`')\n\n\n@bot.command()\n@commands.is_owner()\nasync def reload(ctx, extension: str):\n try:\n await bot.unload_extension(f'cogs.{extension}')\n except Exception as e:\n await ctx.reply(f'Could not unload cog {extension}\\n`{e}`')\n try:\n await bot.load_extension(f'cogs.{extension}')\n await ctx.reply(f'Reloaded cog {extension}')\n logging.info(f'Reloaded cog {extension}')\n except Exception as e:\n await ctx.reply(f'Could not load cog {extension}\\n`{e}`')\n\n\n@bot.command()\n@commands.is_owner()\nasync def say(ctx, *, text: str):\n try:\n await ctx.message.delete()\n except:\n pass\n await ctx.channel.send(text)\n\n\n@bot.command()\n@commands.is_owner()\nasync def reply(ctx, message: str, *, text: str):\n try:\n await ctx.message.delete()\n except:\n pass\n channel = ctx.channel\n message = await channel.fetch_message(int(message))\n await message.reply(text)\n\n\n@bot.command()\n@commands.is_owner()\nasync def react(ctx, message: str, reaction: str):\n try:\n await ctx.message.delete()\n except:\n pass\n channel = ctx.channel\n message = await channel.fetch_message(int(message))\n await message.add_reaction(reaction)\n\n\n@bot.command()\n@commands.is_owner()\nasync def announce(ctx, *, message: str):\n await ctx.reply(f'Sent global message\\n```{message}```')\n ids = []\n for guild in bot.guilds:\n if int(guild.owner_id) not in ids:\n await guild.owner.send(\n f'Message from the dev!\\n```{message}```(you are receiving this, because you own a server with this bot)')\n ids.append(int(guild.owner_id))\n\n\n@bot.command()\n@commands.is_owner()\nasync def raiseexception(ctx):\n raise Exception('artificial exception raised')\n\n\n@bot.command()\n@commands.is_owner()\nasync def serverban(ctx, guild: discord.Guild, *, text: str):\n n = await bot.database.serverbans.count_documents({\"id\": str(guild.id)})\n if n > 0:\n await ctx.response.send_message(\"Server already banned!\", ephemeral=True)\n return\n doc = {\n \"id\": str(guild.id),\n \"name\": str(guild),\n \"owner\": str(guild.owner),\n \"reason\": str(text)\n }\n embed = discord.Embed(title=f\"{guild} has been banned.\",\n description=f\"Your server *{guild}* has been banned from using **{bot.user.name}**.\",\n color=discord.Color.red())\n embed.add_field(name=f\"You can appeal by contacting __**{ctx.message.author}**__.\", value=\"\\u2800\")\n embed.add_field(name=\"Reason\", value=f\"```{text}```\")\n embed.set_footer(text=bot.user, icon_url=bot.user.avatar)\n await guild.owner.send(embed=embed)\n \"\"\"serverbandic[guild.id] = {\n \"name\": str(guild),\n \"owner\": str(guild.owner),\n \"reason\": text\n }\n with open(\"serverbans.json\", \"w\") as file:\n json.dump(serverbandic, file)\"\"\"\n await ctx.reply(\n f\"Server *{guild}* has been permanently blacklisted from using **{bot.user.name}**\")\n bot.database.serverbans.insert_one(doc)\n await guild.leave()\n\n\n@bot.command()\n@commands.is_owner()\nasync def serverunban(ctx, guild: str):\n n = await bot.database.serverbans.count_documents({\"id\": str(guild)})\n if n == 0:\n await ctx.reply(\"Server not banned!\")\n return\n await bot.database.serverbans.delete_one({\"id\": str(guild)})\n await ctx.reply(f\"Server *{guild}* has been unbanned from using **{bot.user.name}**\")\n\n\n@bot.command()\n@commands.is_owner()\nasync def createinvite(ctx, guild: discord.Guild):\n inv = 'error'\n for i in guild.text_channels:\n try:\n inv = await i.create_invite(max_uses=1, reason='bot developer requested server invite.')\n break\n except:\n pass\n await ctx.reply(inv)\n\n\nasync def main():\n async with bot:\n for filename in os.listdir('./cogs'):\n if filename.endswith('.py'):\n if not filename.startswith('-'):\n await bot.load_extension(f'cogs.{filename[:-3]}')\n\n await bot.load_extension('jishaku')\n\n asyncio.create_task(status())\n\n await bot.start(config.token)\n\n\nasyncio.run(main())\n","repo_name":"ProSureString/kidney-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"9"} +{"seq_id":"10135354837","text":"import sys\n\nfrom flask import Flask, render_template, request\nimport requests\n\napp = Flask(__name__)\n\n\nclass Game:\n def __init__(self,\n appid: str,\n title: str,\n wishlist_priority: int=sys.maxsize,\n demo_appid: str = None):\n self.appid = appid\n self.title = title\n self.url = f\"https://store.steampowered.com/app/{appid}/\"\n self.wishlist_priority = wishlist_priority\n self.demo_appid = demo_appid\n\n def get_demo_str(self):\n return \"HAS DEMO\" if self.demo_appid else \"no demo\"\n\n def __str__(self):\n return f\"{self.title} ({self.wishlist_priority}, {self.appid}, {self.get_demo_str()}, {self.url})\"\n\n\n@app.route('/', methods = ['POST', 'GET'])\ndef hello_world():\n if request.method == 'POST':\n\n data = request.form\n steam_id = data.get('steamid')\n\n if not steam_id:\n return render_template(\n \"steamid-form.html\",\n error_message=\"Insert a valid Steam ID\")\n\n try:\n wishlist = steam_request_wishlist(steam_id)\n wishlist_games = build_game_data(wishlist)\n update_demo_info(wishlist_games)\n\n sorted_games = list(wishlist_games.values())\n sorted_games.sort(key=lambda game: game.wishlist_priority)\n\n total_demos = sum([1 for game in sorted_games if game.demo_appid])\n return render_template(\"wishlist.html\",\n game_list=sorted_games,\n total_demos=total_demos,\n steam_id=steam_id)\n except Exception as e:\n print(f\"Something went wrong for steam ID {steam_id}: {e}\")\n return render_template(\n \"steamid-form.html\",\n error_message=f\"Something went wrong: {e}\")\n else:\n return render_template(\"steamid-form.html\")\n\n\ndef build_game_data(wishlist_json: dict):\n\n games = {}\n\n for game_id, game_data in wishlist_json.items():\n title = _get_key(game_data, 'name')\n wishlist_priority = _get_key(game_data, 'priority')\n if game_id:\n games[game_id] = Game(appid=game_id, title=title, wishlist_priority=wishlist_priority)\n\n return games\n\n\ndef update_demo_info(games: dict[str, Game]):\n gameids = list(games.keys())\n\n # Send requests for game demo data in batches of 50\n for ids in range(0, len(gameids), 50):\n current_batch = gameids[ids:ids + 50]\n demo_info_data = steam_request_demo_info(current_batch)\n if demo_info_data:\n _fill_in_demo_appid(games, demo_info_data)\n\n\ndef steam_request_wishlist(steamid):\n\n if str(steamid).isnumeric():\n wishlist_endpoint = f\"https://store.steampowered.com/wishlist/profiles/{steamid}/wishlistdata\"\n else:\n raise Exception(\"Need to provide the steamID, not your steam profile name\")\n\n def is_private_profile(_json):\n return len(_json) == 1 and 'success' in _json.keys()\n\n def is_valid_wishlist_json(_json):\n return len(_json) > 0\n\n wishlist_pages = {}\n page = 0\n error_message = None\n\n # By default, the endpoint returns a limited number of wishlist results,\n # so we need to explicitly request each page until there are no more\n # but set a hard limit at 50 (approx. 4,500 games) so as not to make too many requests\n while page < 50:\n try:\n page_response = requests.get(f\"{wishlist_endpoint}?p={page}\")\n current_page_json = page_response.json()\n\n if is_private_profile(current_page_json):\n error_message = \"Profile is private\"\n break\n\n if is_valid_wishlist_json(current_page_json):\n page += 1\n wishlist_pages.update(current_page_json)\n\n else:\n break\n\n except Exception as e:\n error_message = f\"Error reading wishlist: {e}\"\n break\n\n if error_message:\n raise Exception(error_message)\n else:\n return wishlist_pages\n\n\ndef steam_request_demo_info(app_id_list):\n \"\"\"\n Endpoint from https://github.com/IsThereAnyDeal/AugmentedSteam/issues/447#issuecomment-1280992376\n\n Sample request-response:\n curl -v https://store.steampowered.com/saleaction/ajaxgetdemoevents?appids[]=1284190\n {\"success\":1,\"info\":[{\"appid\":1284190,\"demo_appid\":1754850,\"demo_package_id\":0}]}\n\n curl -v \"https://store.steampowered.com/saleaction/ajaxgetdemoevents?appids[]=1284190&appids[]=867210&appids[]=846030\"\n {\"success\":1,\"info\":[{\"appid\":846030,\"demo_appid\":949730,\"demo_package_id\":0},{\"appid\":1284190,\"demo_appid\":1754850,\"demo_package_id\":0},{\"appid\":867210,\"demo_appid\":0,\"demo_package_id\":0}]}\n \"\"\"\n demo_endpoint = \"https://store.steampowered.com/saleaction/ajaxgetdemoevents\"\n\n appids = [f\"appids[]={appid}\" for appid in app_id_list]\n app_id_params = \"&\".join(appids)\n\n response = requests.get(url=demo_endpoint, params=app_id_params)\n\n # TODO error handling\n try:\n data = response.json()\n return data['info']\n except Exception as e:\n return None\n\n\ndef _get_key(_game_data, key):\n try:\n return _game_data[key]\n except KeyError as e:\n return None\n\n\ndef _fill_in_demo_appid(games_dict, demo_info_data):\n for app_id_obj in demo_info_data:\n appid = _get_key(app_id_obj, 'appid')\n demo_appid = _get_key(app_id_obj, 'demo_appid')\n\n if demo_appid:\n game = games_dict[str(appid)]\n game.demo_appid = demo_appid\n\n\nif __name__ == '__main__':\n # this port must correlate with `internal_port` from fly.toml\n app.run(port=8080, host='0.0.0.0')","repo_name":"cristan2/steam-wishlist-demos","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"5682447281","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\ndeneme1 = np.zeros((10))\r\n\r\nprint(deneme1)\r\n\r\ndeneme2 = np.zeros((10))\r\n\r\ndeneme3 = np.zeros((10))\r\n\r\nepochs = 10\r\n\r\nfor i in range(epochs):\r\n deneme1[i] = (i + 2) * 2\r\n deneme2[i] = (i + 3) * 2\r\n deneme3[i] = i","repo_name":"YusufSonmezz/FordStaj1","sub_path":"src/model_load.py","file_name":"model_load.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"9"} +{"seq_id":"38254863730","text":"import contextlib\nimport logging\nimport os\nfrom typing import Dict, Optional\nfrom unittest import mock\n\nimport torch_xla\nfrom absl.testing import absltest, parameterized\nimport torch_xla.core.xla_env_vars as xenv\nimport torch_xla.core.xla_model as xm\nfrom torch_xla.experimental import pjrt\n\n\nclass TestExperimentalPjrt(parameterized.TestCase):\n\n def setUp(self):\n pjrt.set_device_type('CPU')\n\n @parameterized.parameters(('CPU', 'CPU'), ('GPU', 'GPU'), ('TPU', 'TPU'),\n ('TPU_C_API', 'TPU'), ('TPU_LEGACY', 'TPU'))\n def test_device_type(self, pjrt_device, expected):\n with mock.patch.dict(os.environ, {'PJRT_DEVICE': pjrt_device}, clear=True):\n self.assertEqual(pjrt.device_type(), expected)\n\n def test_requires_pjrt(self):\n with mock.patch.dict(\n os.environ, {'PJRT_SELECT_DEFAULT_DEVICE': '0'}, clear=True):\n with self.assertRaises(NotImplementedError):\n pjrt.xla_device()\n\n def test_default_ordinals(self):\n global_ordinal = xm.get_ordinal()\n self.assertEqual(global_ordinal, 0)\n\n local_ordinal = xm.get_local_ordinal()\n self.assertEqual(local_ordinal, 0)\n\n def test_num_local_devices(self):\n self.assertLen(xm.get_xla_supported_devices(),\n pjrt.addressable_device_count())\n\n def test_num_global_devices(self):\n self.assertLen(torch_xla._XLAC._xla_get_all_devices(),\n pjrt.global_device_count())\n\n def test_world_size(self):\n self.assertEqual(xm.xrt_world_size(), pjrt.world_size())\n\n def test_xla_device_error(self):\n with self.assertRaises(IndexError):\n xm.xla_device(10)\n\n @parameterized.named_parameters(('default', {}, True), ('no_default', {\n 'PJRT_SELECT_DEFAULT_DEVICE': '0'\n }, False), ('pjrt_cpu', {\n 'PJRT_DEVICE': 'CPU',\n 'PJRT_SELECT_DEFAULT_DEVICE': '0'\n }, True), ('xrt_tpu', {\n 'XRT_TPU_CONFIG': 'localservice;0;localhost:51011'\n }, False), ('pjrt_tpu_precedence', {\n 'PJRT_DEVICE': 'TPU',\n 'XRT_TPU_CONFIG': 'localservice;0;localhost:51011',\n }, True), ('xrt_gpu', {\n 'GPU_NUM_DEVICES': '4'\n }, False), ('pjrt_gpu', {\n 'PJRT_DEVICE': 'GPU',\n 'GPU_NUM_DEVICES': '4'\n }, True), ('xla_dist_worker', {\n 'XRT_LOCAL_WORKER': 'c_localservice:2'\n }, False))\n def test_pjrt_default_device(self, env_vars, expect_using_pjrt):\n with mock.patch.dict(os.environ, env_vars, clear=True):\n # Print a warningif we had to select a default runtime\n if 'PJRT_DEVICE' not in os.environ and expect_using_pjrt:\n logs_context = self.assertLogs(level=logging.WARNING)\n else:\n logs_context = contextlib.nullcontext()\n\n with logs_context:\n # Configure default device\n pjrt.using_pjrt()\n\n if expect_using_pjrt:\n self.assertIn(pjrt.device_type(), ['CPU', 'GPU', 'TPU'])\n else:\n self.assertIsNone(pjrt.device_type())\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"ddunl/pytorch-xla","sub_path":"test/pjrt/test_experimental_pjrt.py","file_name":"test_experimental_pjrt.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"9"} +{"seq_id":"15151782426","text":"# Python\nimport json\n\n# FastApi\n\nfrom fastapi import HTTPException\nfrom fastapi import status\nfrom fastapi import Query\nfrom fastapi import APIRouter\n\n# Modulos\n\nfrom queries.data_inflacion import get_inflation\n\nrouter = APIRouter()\n\n\n# Obtengo el indíce de inflacion y la inflación acumulada en periodo seleccionado en la categoría seleccionada\n@router.get(\n path='/inflacion/',\n status_code=status.HTTP_200_OK,\n summary=\"Get information about inflation by date and category (base=2016)\",\n tags=[\"Inflation\"]\n )\nasync def get_inflation_by_date(\n inflation_type:str = Query(...,\n title=\"Inflation Type\",\n description=\"Ingresar una categoría listada en la base de datos: Nivel general, Servicios, Núcleo, etc\", \n min_length=1,\n example=\"Nivel general\"),\n date_from:str = Query(...,\n title=\"date from\",\n description=\"Ingresar una fecha desde la cual hacer la consulta\", \n min_length=1,\n example=\"2022-01-01\"),\n date_to:str = Query(...,\n title=\"date from\",\n description=\"Ingresar una fecha hasta la cual hacer la consulta\", \n min_length=1,\n example=\"2023-02-01\") \n ):\n \"\"\"\n Obtención de la información correspondiente a la categoría seleccionada por fecha\n \"\"\" \n df = get_inflation(inflation_type)\n df = df.query(f\"Date >= '{date_from}' and Date <= '{date_to}'\")\n for col in df.columns[1:]:\n df[f\"Cumalative Inflation-{col.title()}\"] = ((1 + (df[col].pct_change())).cumprod()-1)*100\n df = df[~df[df.columns[0]].isnull()]\n if len (df)!=0:\n js = json.loads(df.to_json(orient = 'records'))\n return js\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\"Categoría no encontrada\")\n","repo_name":"eze2286/FastAPI-proyecto-inflation-dolar-asset","sub_path":"routers/inflation.py","file_name":"inflation.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"72912309415","text":"import argparse\nimport json\nfrom pathlib import Path\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\n\nimport tensorrt_llm\nimport tensorrt_llm.profiler as profiler\nfrom tensorrt_llm.logger import logger\n\nfrom build import get_engine_name # isort:skip\nfrom whisper_utils import load_audio, pad_or_trim, log_mel_spectrogram\n\nfrom encoding import WhisperEncoding\nfrom decoding import WhisperDecoding\n\nfrom torch_model import ModelDimensions, Whisper\n\nfrom normalizers import EnglishTextNormalizer\n\nfrom tqdm.notebook import tqdm\n\nimport pandas as pd\nimport jiwer\n\nimport string\nimport re\n\ndef eval_tensorrt_llm(whisper_encoding, whisper_decoding, mel):\n audio_features = whisper_encoding.get_audio_features(mel)\n \n languages, language_probs = whisper_decoding.detect_language(audio_features)\n\n tokens, sum_logprobs, no_speech_probs = whisper_decoding.main_loop(audio_features)\n\n result = whisper_decoding.post_process(tokens, sum_logprobs, no_speech_probs, audio_features, languages)\n \n result = result[0]\n return result\n\ndef eval_torch(whisper_encoding, whisper_decoding, mel, model):\n\n audio_features = whisper_encoding.torch_get_audio_features(model, mel)\n \n languages, language_probs = whisper_decoding.torch_detect_language(model, audio_features)\n\n tokens, sum_logprobs, no_speech_probs = whisper_decoding.torch_main_loop(model, audio_features)\n\n result = whisper_decoding.post_process(tokens, sum_logprobs, no_speech_probs, audio_features, languages)\n \n result = result[0]\n return result\n\ndef load_dataset(dataset_dir):\n label_file = None\n audio_file = []\n for file in dataset_dir.iterdir():\n if str(file).endswith('txt'):\n label_file = file\n else:\n audio_file.append(file)\n \n references = []\n with open(label_file, 'r') as f:\n for line in f:\n references.append((str(line).split(' ', 1))[1].replace('\\n', ''))\n \n return audio_file, references\n\ndef main(args):\n tensorrt_llm.logger.set_level(args.log_level)\n \n test_trt_llm = args.test_trt_llm\n test_torch = args.test_torch\n \n checkpoint = torch.load(args.checkpoint_file)\n model_metadata = OrderedDict(checkpoint['dims'])\n\n dims = ModelDimensions(**checkpoint[\"dims\"])\n model = Whisper(dims)\n model.load_state_dict(checkpoint[\"model_state_dict\"])\n model = model.to('cuda')\n \n del checkpoint\n\n world_size = 1\n runtime_rank = tensorrt_llm.mpi_rank()\n runtime_mapping = tensorrt_llm.Mapping(world_size, runtime_rank)\n torch.cuda.set_device(runtime_rank % runtime_mapping.gpus_per_node)\n\n model_metadata.update({'n_audio': 1})\n\n engine_dir = Path(args.engine_dir)\n\n whisper_encoding = WhisperEncoding(\n engine_dir,\n )\n\n whisper_decoding = WhisperDecoding(\n engine_dir,\n )\n output_torch = []\n output_tensorrt_llm = []\n output_reference = []\n\n dataset_dir = Path(args.dataset_dir)\n for dir in dataset_dir.iterdir():\n for child_dir in dir.iterdir():\n \n audio_files, references = load_dataset(child_dir)\n audio_files = sorted(audio_files)\n \n for i, audio_file in enumerate(audio_files):\n \n audio = load_audio(audio_file)\n if audio.shape[-1] > 480000:\n continue\n audio = pad_or_trim(audio)\n mel = log_mel_spectrogram(audio).to('cuda').type(torch.float16)\n mel = mel.unsqueeze(0)\n output_reference.append(references[i])\n \n if test_torch: \n profiler.start('torch')\n result = eval_torch(whisper_encoding, whisper_decoding, mel, model)\n profiler.stop('torch')\n punctuations = re.findall(r'[.,!?]', result.text) \n table = str.maketrans({p: '' for p in punctuations})\n result = result.text.translate(table).upper()\n output_torch.append(result)\n logger.info(\n \"---------------------------------------------------------\")\n logger.info(\"Torch Generated : \")\n logger.info(f\" Input : {audio_file}\")\n logger.info(f\"\\n Reference : {references[i]}\")\n logger.info(f\"\\n Output : {result}\")\n logger.info(\n \"---------------------------------------------------------\")\n\n if test_trt_llm: \n profiler.start('tensorrt_llm')\n result = eval_tensorrt_llm(whisper_encoding, whisper_decoding, mel)\n profiler.stop('tensorrt_llm')\n punctuations = re.findall(r'[.,!?]', result.text) \n table = str.maketrans({p: '' for p in punctuations})\n result = result.text.translate(table).upper()\n output_tensorrt_llm.append(result)\n logger.info(\n \"---------------------------------------------------------\")\n logger.info(\"TensorRT-LLM Generated : \")\n logger.info(f\" Input : {audio_file}\")\n logger.info(f\"\\n Reference : {references[i]}\")\n logger.info(f\"\\n Output : {result}\")\n logger.info(\n \"---------------------------------------------------------\")\n \n normalizer = EnglishTextNormalizer()\n \n if test_torch:\n data = pd.DataFrame(dict(hypothesis=output_torch, reference=output_reference))\n data[\"hypothesis_clean\"] = [normalizer(text) for text in data[\"hypothesis\"]]\n data[\"reference_clean\"] = [normalizer(text) for text in data[\"reference\"]]\n wer = jiwer.wer(list(data[\"reference_clean\"]), list(data[\"hypothesis_clean\"]))\n logger.info(\n f'Torch (total latency: {profiler.elapsed_time_in_sec(\"torch\")} sec)'\n )\n logger.info(f\"Torch beam 0 result\")\n logger.info(f\"\\nWER: {wer * 100:.2f} %\")\n\n if test_trt_llm:\n data = pd.DataFrame(dict(hypothesis=output_tensorrt_llm, reference=output_reference))\n data[\"hypothesis_clean\"] = [normalizer(text) for text in data[\"hypothesis\"]]\n data[\"reference_clean\"] = [normalizer(text) for text in data[\"reference\"]]\n wer = jiwer.wer(list(data[\"reference_clean\"]), list(data[\"hypothesis_clean\"]))\n logger.info(\n f'TensorRT-LLM (total latency: {profiler.elapsed_time_in_sec(\"tensorrt_llm\")} sec)'\n )\n logger.info(f\"TensorRT-LLM beam 0 result\")\n logger.info(f\"\\nWER: {wer * 100:.2f} %\")\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_torch', action='store_true')\n parser.add_argument('--test_trt_llm', action='store_true')\n parser.add_argument('--data_type',\n type=str,\n choices=['fp16'],\n default='fp16')\n parser.add_argument('--log_level', type=str, default='info')\n parser.add_argument('--engine_dir', type=str, default='whisper_outputs')\n parser.add_argument('--dataset_dir', type=str, default='./LibriSpeech/test-clean')\n parser.add_argument('--checkpoint_file', type=str, default='./large-v2.pt')\n args = parser.parse_args()\n main(args)\n","repo_name":"Eddie-Wang1120/Eddie-Wang-Hackathon2023","sub_path":"tensorrt_llm_july-release-v1/examples/whisper/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":7435,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"9"} +{"seq_id":"30651220956","text":"def _workspace_binary_script_impl(ctx):\n content = \"\"\"#!/usr/bin/env bash\nset -o errexit\nset -o nounset\nset -o pipefail\n\nif [[ -n \"${{BUILD_WORKSPACE_DIRECTORY:-}}\" ]]; then\n # Running from inside bazel\n cd \"${{BUILD_WORKSPACE_DIRECTORY}}\"\nelse\n # Running from bazel-bin\n cd \"$(git rev-parse --show-toplevel)\"\nfi\n# bazel-repo-infra will handle both external and local binaries, aka\n# bazel-repo-infra/external/go_sdk/bin/go\n# bazel-repo-infra/bazel-out/k8-fastbuild/bin/cmd/kazel/linux_amd64_stripped/kazel\n\"bazel-${{PWD##*/}}/{cmd}\" \"$@\"\n\"\"\".format(\n cmd = ctx.file.cmd.path,\n )\n ctx.actions.write(\n output = ctx.outputs.executable,\n content = content,\n is_executable = True,\n )\n runfiles = ctx.runfiles(\n files = [\n ctx.file.cmd,\n ],\n )\n return [DefaultInfo(runfiles = runfiles)]\n\n_workspace_binary_script = rule(\n attrs = {\n \"cmd\": attr.label(\n mandatory = True,\n allow_single_file = True,\n ),\n },\n executable = True,\n implementation = _workspace_binary_script_impl,\n)\n\ndef workspace_binary(\n name,\n cmd,\n args = None,\n visibility = None):\n \"\"\"Wraps a binary to be run in the workspace root via bazel run.\n\n For example, one might do something like\n\n workspace_binary(\n name = \"dep\",\n cmd = \"//vendor/github.com/golang/dep/cmd/dep\",\n )\n which would allow running dep with bazel run.\n \"\"\"\n script_name = name + \"_script\"\n _workspace_binary_script(\n name = script_name,\n cmd = cmd,\n tags = [\"manual\"],\n )\n native.sh_binary(\n name = name,\n srcs = [\":\" + script_name],\n args = args,\n visibility = visibility,\n tags = [\"manual\"],\n )\n","repo_name":"kubernetes-retired/kubefed","sub_path":"third-party/k8s.io/repo-infra/defs/run_in_workspace.bzl","file_name":"run_in_workspace.bzl","file_ext":"bzl","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","stars":2484,"dataset":"github-code","pt":"9"} +{"seq_id":"21428835635","text":"import torch\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom datetime import datetime\nfrom dataset import CamvidDataset\nfrom evalution_segmentation import eval_semantic_segmentation\nfrom FCN import FCN\nimport cfg\n\n\n# 有显卡用显卡跑,没有用 cpu 跑\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(device)\n\nmiou_list = [0]\n\n\n# 载入测试集\nCam_test = CamvidDataset([cfg.TEST_ROOT, cfg.TEST_LABEL], cfg.crop_size)\ntest_data = DataLoader(Cam_test, batch_size=cfg.BATCH_SIZE, shuffle=False, num_workers=0)\n\nnet = FCN(12)\n\nnet.to(device)\nnet.load_state_dict(torch.load('train_moiu = 0.32850_1.pth'))\n\ntrain_acc = 0\ntrain_miou = 0\ntrain_class_acc = 0\ntrain_mpa = 0\nerror = 0\n\n\nfor i, sample in enumerate(test_data):\n img = Variable(sample[\"img\"].to(device))\n label = Variable(sample[\"label\"].to(device))\n\n out = net(img)\n out = F.log_softmax(out, dim=1)\n\n # 取最大值的索引\n pre_label = out.max(dim=1)[1].data.cpu().numpy()\n pre_label = [i for i in pre_label]\n\n true_label = label.data.cpu().numpy()\n true_label = [i for i in true_label]\n\n # 计算混淆矩阵\n eval_metric = eval_semantic_segmentation(pre_label, true_label)\n\n train_acc += eval_metric[\"mean_class_accuracy\"]\n train_miou += eval_metric[\"miou\"]\n\n # 类准确度\n train_mpa += eval_metric[\"pixel_accuracy\"]\n\n if len(eval_metric['class_accuracy']) < 12:\n eval_metric['class_accuracy'] = 0\n train_class_acc += eval_metric['class_accuracy']\n error += 1\n else:\n train_class_acc += eval_metric['class_accuracy']\n\n # print(eval_metric['class_accuracy'], \"================\", i)\n\n\n\n# 一次大循环下的结果指标\nepoch_str = 'Test Acc : {:.5f} Test Mpa : {:.5f} Test Mean : {:.5f} \\n Test class acc : {:}'.format(\n train_acc / len(test_data) - error,\n train_miou / len(test_data) - error,\n train_mpa / len(test_data) - error,\n train_class_acc / len(test_data) - error\n)\n\nif train_miou / len(test_data) - error > max(miou_list):\n miou_list.append(train_miou / (len(test_data) - error))\n print(\"最大的 miou 序列 = \" , miou_list)\n print(epoch_str + \"=============\")","repo_name":"Jerry365/FCN_shenduzhiyan","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"9"} +{"seq_id":"8848461501","text":"import logging\nimport pytest\nfrom Shared.API.secret import set_users_effective_permissions, del_secret\n\nlogger = logging.getLogger('test')\n\n\n@pytest.mark.api\n@pytest.mark.pas\n@pytest.mark.pasapi\n@pytest.mark.bhavna\ndef test_need_delete_permission_on_secret_to_be_able_to_delete_it(core_session,\n added_secrets,\n added_secrets_file,\n users_and_roles):\n \"\"\"\n test method to delete a secret for User A with/without \"DELETE\" permissions enabled for User A\n 1) with \"DELETE\" member permissions enabled for User A, verify secret should be deleted.\n 2) without \"DELETE\" member permissions enabled for User A, verify secret should not be deleted.\n :param core_session: Authenticated Centrify Session.\n :param added_secrets: Fixture to add text type secrets & yields secret related details\n :param added_secrets_file: Fixture to add file type secrets & yields secret related details\n :param users_and_roles: Fixture to create New user with PAS Power Rights\n \"\"\"\n secret_id_list, secret_name = added_secrets\n added_file_secret_id = added_secrets_file\n pas_power_user = users_and_roles.get_user('Privileged Access Service Power User')\n user_name = pas_power_user.get_login_name()\n user_id = pas_power_user.get_id()\n pas_power_user_session = users_and_roles.get_session_for_user('Privileged Access Service Power User')\n assert pas_power_user_session.auth_details, 'Failed to Login with PAS Power User'\n logger.info(f'User with PAS Power User Rights login successfully :user_Name: {user_name}'\n f' & Password: {pas_power_user.get_password()} ')\n\n # Api to set permissions(DELETE) for User A\n text_type_secret_result, text_type_secret_success = set_users_effective_permissions(core_session,\n user_name,\n 'View,Grant,Delete,Retrieve',\n user_id,\n secret_id_list[0])\n assert text_type_secret_success, f'Failed to set permissions for text type secret:{text_type_secret_result}'\n logger.info(f'setting permissions for text type secret: {text_type_secret_success}')\n\n # Api to delete the secret with DELETE permissions\n del_success, del_result = del_secret(pas_power_user_session, secret_id_list[0])\n assert del_success, f'Not Able to delete the child secret: {del_result}'\n logger.info(f'Able to delete the child secret:{del_result}')\n for secret_id in secret_id_list:\n secret_id_list.remove(secret_id)\n logger.info(f'Successfully Deleted secrets with secret name {secret_name}')\n\n # Api to set permissions(without DELETE) for User A\n text_type_secret_result, text_type_secret_success = set_users_effective_permissions(core_session,\n user_name,\n 'View,Grant,Retrieve',\n user_id,\n added_file_secret_id)\n assert text_type_secret_success, f'Failed to set permissions for text type secret:{text_type_secret_result}'\n logger.info(f'setting permissions for text type secret: {text_type_secret_success}')\n\n # Api to delete the secret without DELETE permissions\n del_success, del_result = del_secret(pas_power_user_session, added_file_secret_id)\n assert del_success is False, f'Able to delete the child secret: {del_result}'\n logger.info(f'Able to delete the child secret:{del_result}{del_success}')\n","repo_name":"jaspalsingh92/TestAutomation-1","sub_path":"framework/Tests/PAS/PAS/GeneralSecrets/SecretsV2/Secrets/DeleteSecrets/API/test_need_delete_permission_on_secret_to_be_able_to_delete_it.py","file_name":"test_need_delete_permission_on_secret_to_be_able_to_delete_it.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"14509972759","text":"def split_words(self, s):\n s = self.strip_words(s)\n if len(s) == 0:\n return []\n\n res = []\n start = 0\n end = len(s) - 1\n j = 0\n\n while start <= end:\n while j <= end and s[j] != ' ':\n j += 1\n\n if len(s[start : j]) >= 1:\n res.append(s[start : j])\n\n if j <= end: # since s is already striped, so there is no leading and trailing spaces, so if j != end, then there must be some more words ahead.\n while s[j] == ' ': # ignore the spaces between words\n j += 1\n\n start = j\n\n return res","repo_name":"xy008areshsu/Leetcode_complete","sub_path":"python_version/split_words.py","file_name":"split_words.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"8042276635","text":"# Code to implement the correlation funciton evolution (Redfield) dynamics of the system consisting of fermions\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom qutip import *\nfrom scipy import integrate\n\n\n\n#declaring parameters\nw0=1\nbeta=0.8\nmu=-2.5\nepsilon=0.1\nmu1=mu\nmu2=mu\nbeta1=beta\nbeta2=beta\nGamma1=1\nGamma2=4\nGamma=[]\nGamma.append(Gamma1) #Gamma1=1\nGamma.append(Gamma2) #Gamma2=4\ntb=1\n\n\n\n\ndef spectral_bath(omega,Gamma, tb): #Define the spectral bath function\n if (omega <= -2*tb):\n return 0\n if (omega >= 2*tb):\n return 0\n return Gamma*np.sqrt(1-(omega*omega)/(4*tb*tb))\n\n\ndef nbar(omega, beta, mu): # Define the fermionic occupation number\n return 1/(np.exp(beta*(omega-mu))+1)\n\n\ndef intefunc1(omega,Gamma,tb,beta,mu): # Define spectral_bath*nbar, a function required for computing the evolution.\n if (omega <= -2*tb):\n return 0\n if (omega >= 2*tb):\n return 0\n return spectral_bath(omega,Gamma,tb)*nbar(omega,beta,mu)\n\n\n\ndef evolfunc(state,M,O): #Evolve \n return M@state+O\n\n\ndef f(omega,c,alpha,gamma,Gamma,tb): # computes the f_(\\alpha,\\gamma) defined in the paper.\n f=0\n f=f+0.5*c[0,alpha].conj()*c[0,gamma]*spectral_bath(omega,Gamma[0],tb)\n f=f+0.5*c[1,alpha].conj()*c[1,gamma]*spectral_bath(omega,Gamma[1],tb)\n return f\n\ndef F(omega,c,alpha,gamma,Gamma,tb,beta1,beta2,mu1,mu2): # computes the F_(\\alpha,\\gamma) defined in the paper.\n f=0\n F=0\n F=F+0.5*c[0,alpha].conj()*c[0,gamma]*intefunc1(omega,Gamma[0],tb,beta1,mu1)\n F=F+0.5*c[1,alpha].conj()*c[1,gamma]*intefunc1(omega,Gamma[1],tb,beta2,mu2)\n return F\n\n\nglist=[0.1] #Define the g values for which you wish to compute time evolution. \n#glist=np.linspace(0.0,0.5,27)\n\n\nfinallist=[] #optional (See end of code)\n\nc=np.empty((2,2),dtype=np.double) # c matrix as defined in the paper. Converts the eigenmode operators to the original site operators\n\n\nc[0,0]=1/np.sqrt(2)\nc[0,1]=1/np.sqrt(2)\nc[1,0]=-1/np.sqrt(2)\nc[1,1]=1/np.sqrt(2)\n\n\n\nfor g in glist:\n print(\"gvalue is \",g)\n\n w=[]\n\n w.append(w0-g)\n w.append(w0+g) #stores eigenfrequencies of the system.\n\n \n pvaluej=np.empty((2,2),dtype=np.cdouble) #stores Cauchy P value of J \n pvaluejn=np.empty((2,2),dtype=np.cdouble) #stores Cauchy of J*n\n\n pvaluejn[0,0]=(-1.0j/(2*np.pi))*integrate.quad(intefunc1,-2*tb,2*tb,args=(Gamma1,tb,beta1,mu1),weight='cauchy',wvar=w[0])[0] #bath1, w1\n pvaluejn[0,1]=(-1.0j/(2*np.pi))*integrate.quad(intefunc1,-2*tb,2*tb,args=(Gamma1,tb,beta1,mu1),weight='cauchy',wvar=w[1])[0] #bath1,w2\n pvaluejn[1,0]=(-1.0j/(2*np.pi))*integrate.quad(intefunc1,-2*tb,2*tb,args=(Gamma2,tb,beta2,mu2),weight='cauchy',wvar=w[0])[0] #bath2, w1\n pvaluejn[1,1]=(-1.0j/(2*np.pi))*integrate.quad(intefunc1,-2*tb,2*tb,args=(Gamma2,tb,beta2,mu2),weight='cauchy',wvar=w[1])[0]\n\n \n pvaluej[0,0]=(-1.0j/(2*np.pi))*integrate.quad(spectral_bath,-2*tb,2*tb,args=(Gamma1,tb),weight='cauchy',wvar=w[0])[0] #bath1, w1\n pvaluej[0,1]=(-1.0j/(2*np.pi))*integrate.quad(spectral_bath,-2*tb,2*tb,args=(Gamma1,tb),weight='cauchy',wvar=w[1])[0] #bath1,w2\n pvaluej[1,0]=(-1.0j/(2*np.pi))*integrate.quad(spectral_bath,-2*tb,2*tb,args=(Gamma2,tb),weight='cauchy',wvar=w[0])[0] #bath2, w1\n pvaluej[1,1]=(-1.0j/(2*np.pi))*integrate.quad(spectral_bath,-2*tb,2*tb,args=(Gamma2,tb),weight='cauchy',wvar=w[1])[0]\n \n # we have computed the pvalues that are necessary to com\n # Note that we have included the \"2\" and \"-1\" factor in integrals above.\n \n \n #first 2 indices are alpha, gamma, last index is w1 or w2. (Some of these entries are not necessary)\n # ie, f_matrix(alpha,gamma,w1)=f_{alpha,gamma} (w1) as defined in the paper.\n \n f_matrix=np.empty((2,2,2),dtype=np.cdouble) \n F_matrix=np.empty((2,2,2),dtype=np.cdouble)\n f_deltamatrix=np.empty((2,2,2),dtype=np.cdouble)\n F_deltamatrix=np.empty((2,2,2),dtype=np.cdouble)\n \n # We now compute the correlation function evolution. \n # We let C[0]=C_11 C[1]=C_12, C[2]=C_21, C[3]=C_33\n # Therefore, the correlation function evolution is given by dC/dT=M*C+O. We construct M and O,\n \n for alpha in range(2):\n for gamma in range(2):\n f_matrix[alpha,gamma,0]=f(w[0],c,alpha,gamma,Gamma,tb)\n f_matrix[alpha,gamma,1]=f(w[1],c,alpha,gamma,Gamma,tb)\n F_matrix[alpha,gamma,0]=F(w[0],c,alpha,gamma,Gamma,tb,beta1,beta2,mu1,mu2)\n F_matrix[alpha,gamma,1]=F(w[1],c,alpha,gamma,Gamma,tb,beta1,beta2,mu1,mu2)\n \n f_deltamatrix[alpha,gamma,0]=(-1/np.pi)*integrate.quad(f,-2*tb,2*tb,args=(c,alpha,gamma,Gamma,tb),weight='cauchy',wvar=w[0])[0]\n f_deltamatrix[alpha,gamma,1]=(-1/np.pi)*integrate.quad(f,-2*tb,2*tb,args=(c,alpha,gamma,Gamma,tb),weight=\"cauchy\",wvar=w[1])[0]\n F_deltamatrix[alpha,gamma,0]=(-1/np.pi)*integrate.quad(F,-2*tb,2*tb,args=(c,alpha,gamma,Gamma,tb,beta1,beta2,mu1,mu2),weight=\"cauchy\",wvar=w[0])[0]\n F_deltamatrix[alpha,gamma,1]=(-1/np.pi)*integrate.quad(F,-2*tb,2*tb,args=(c,alpha,gamma,Gamma,tb,beta1,beta2,mu1,mu2),weight=\"cauchy\",wvar=w[1])[0]\n \n \n \n M=np.empty((4,4),dtype=np.cdouble)\n O=np.empty((4,1),dtype=np.cdouble)\n \n #Construction done.\n\n O[0]=2*epsilon*epsilon*F_matrix[0,0,0]\n M[0,0]=-2*epsilon*epsilon*f_matrix[0,0,0]\n M[0,1]=-epsilon*epsilon*(f_matrix[0,1,1]+1.0j*f_deltamatrix[0,1,1])\n M[0,2]=-epsilon*epsilon*(f_matrix[0,1,1]-1.0j*f_deltamatrix[0,1,1])\n M[0,3]=0\n \n #C11 done.\n \n O[1]=epsilon*epsilon*(F_matrix[1,0,0]+F_matrix[0,1,1]+1.0j*F_deltamatrix[1,0,0]-1.0j*F_deltamatrix[0,1,1])\n M[1,0]=-epsilon*epsilon*(f_matrix[1,0,0]+1.0j*f_deltamatrix[1,0,0])\n M[1,1]=1.0j*w[0]-1.0j*w[1] -epsilon*epsilon*(f_matrix[0,0,0]+f_matrix[1,1,1]+1.0j*f_deltamatrix[1,1,1]-1.0j*f_deltamatrix[0,0,0])\n M[1,2]=0\n M[1,3]=-epsilon*epsilon*(f_matrix[0,1,1]-1.0j*f_deltamatrix[0,1,1])\n \n #C12 done\n \n O[2]=epsilon*epsilon*(F_matrix[0,1,1]+F_matrix[1,0,0]+1.0j*F_deltamatrix[0,1,1]-1.0j*F_deltamatrix[1,0,0])\n M[2,0]=-epsilon*epsilon*(f_matrix[1,0,0]-1.0j*f_deltamatrix[1,0,0])\n M[2,1]=0\n M[2,2]=1.0j*w[1]-1.0j*w[0]-epsilon*epsilon*(f_matrix[0,0,0]+f_matrix[1,1,1]-1.0j*f_deltamatrix[1,1,1]+1.0j*f_deltamatrix[0,0,0])\n M[2,3]=-epsilon*epsilon*(f_matrix[0,1,1]+1.0j*f_deltamatrix[0,1,1])\n \n #C21 Done\n \n O[3]=2*epsilon*epsilon*F_matrix[1,1,1]\n M[3,0]=0\n M[3,1]=-epsilon*epsilon*(f_matrix[1,0,0]-1.0j*f_deltamatrix[1,0,0])\n M[3,2]=-epsilon*epsilon*(f_matrix[1,0,0]+1.0j*f_deltamatrix[1,0,0])\n M[3,3]=-2*epsilon*epsilon*f_matrix[1,1,1]\n \n #C22 done\n \n \n #implement RK4 evolution.\n initial=np.zeros((4,1),np.cdouble) #Initial values of the correlators.\n store=[] #stores the value of the correlatios.\n \n h=0.2 #step size of RK4\n tmax=500 #max time\n steps=int(tmax/h +1)\n \n store.append(initial)\n N1=[] #Stores occupation number of the first site.\n N1.append(initial[0])\n N2=[] #Stores occupation number of the second site.\n N2.append(initial[0])\n \n \n tlist=np.linspace(0,tmax,steps)\n for k in range(steps-1):\n wi=store[k]\n k1=h*evolfunc(wi,M,O)\n k2=h*evolfunc(wi+0.5*k1,M,O)\n k3=h*evolfunc(wi+0.5*k2,M,O)\n k4=h*evolfunc(wi+k3,M,O)\n wi_1=wi+(k1+2*k2+2*k3+k4)/6\n \n store.append(wi_1)\n #if (k%100==0):\n # print (\"k=\",k)\n N1.append(wi_1[0]) #update occupation numbers\n N2.append(wi_1[3])\n \n #plot the occupation numbers.\n plt.plot(tlist,N1,label=\"N1\")\n plt.plot(tlist,N2,label=\"N2\")\n plt.legend()\n plt.show()\n #finallist.append(1.0j*g*(-wi_1[2]+wi_1[1]))\n#plt.plot(tlist,blah)\n \n#plt.plot(glist,finallist)\n\n \n \n \n \n","repo_name":"dtupkary/open-quantum-systems-project","sub_path":"correlations_fermions.py","file_name":"correlations_fermions.py","file_ext":"py","file_size_in_byte":7760,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"71840183327","text":"from abc import ABC\nclass Inventory(ABC):\n id=0\n raw_data=[]\n\n @classmethod\n def getid(cls):\n cls.id+=1\n return cls.id\n @classmethod\n def getdata(cls):\n return cls.raw_data\n \n def __init__(self,name,price,quantity,category):\n \n self.id=self.getid()\n self.name=name\n self.price=price\n self.quantity=quantity\n self.category=category \n\n def add_item(self):\n self.getdata().append({'id':self.id,'name':self.name,'price':self.price,'quantity':self.quantity,'category':self.category})\n \n\n\n\n \n def view_all_item(self):\n g=False\n for item in self.getdata():\n print(\"id\",item['id'],\"Name\",item['name'],\"price:\",item['price'],\"quantity:\",item['quantity'],\"category:\",item['category']) \n g=True\n if g == False:\n print(\"no data found\") \n\n def report_item(self):\n try: \n \n t=False\n for item in self.getdata():\n if item['quantity'] < 10:\n print(\"id\",item['id'],\"Name\",item['name'],\"price:\",item['price'],\"quantity:\",item['quantity'],\"category:\",item['category']) \n t=True\n if t==True:\n print(\"these are the product with the minimum quantity\")\n \n if t == False:\n print(\"quantity less 10 not available\") \n except Exception as e:\n print(e) \n\n def update_item_info(self):\n try:\n id= int(input(\"enter the id of product:\"))\n t=False\n for item in self.getdata():\n if item['id'] == id:\n print(\"your product name :\",item['name'])\n price=int(input('enter new price :')) \n quantity=int(input('enter the new quantity:'))\n category=input(\"enter the category of product:\")\n item['price']=price\n item['quantity']=quantity\n item['category']=category\n t=True\n if t ==False: \n print(\"id number not found\") \n except Exception as e:\n print(e) \n\n def del_item(self):\n try:\n id= int(input(\"enter the id of item:\"))\n for item in self.getdata():\n if item['id'] == id:\n self.getdata().remove(item)\n print(self.getdata())\n t=True \n if t == False:\n print(\"id number not found\") \n\n except Exception as e:\n print(e) \n\nclass Electronic(Inventory):\n def __init__(self, name, price, quantity, category):\n super().__init__(name, price, quantity, category)\n\n\ng=0\n\nwhile g==0:\n def exit():\n choice=input(\"-Are you sure want to exit?(Y/N)\") \n if choice == 'Y' or choice == 'y':\n print(\"-Thank you \")\n return 1\n \n print(\"---- Inventory management system ----\")\n print(\"add -> add item\")\n print(\"view -> view all item\")\n print(\"report -> look report of item\")\n print(\"update -> update item\")\n print(\"del -> delete item\")\n print(\"exit -> exit\")\n\n choice=input(\"enter your choice :\")\n \n\n if choice == 'add':\n try:\n name=input('enter the name of product:')\n price=float(input(\"enter the price of product:\"))\n quantity=int(input(\"enter the quantity of product\"))\n category=input(\"enter the name of category\") \n data=Electronic(name,price,quantity,category)\n data.add_item()\n print(id(data))\n print(Electronic.raw_data)\n except Exception as e:\n print(e)\n\n elif choice == 'view':\n data.view_all_item()\n print(id(data))\n elif choice == 'report':\n data.report_item() \n elif choice == 'update':\n data.update_item_info() \n elif choice == 'del':\n data.del_item() \n\n elif choice == 'exit':\n g=exit() \n else:\n print(\"please choose valid choice\") ","repo_name":"shahpankaj123/Intenship-Task","sub_path":"Day6_Task1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"4448869726","text":"#!/usr/bin/env python\n\nimport sys\nimport traceback\nimport os.path\nimport subprocess\nfrom optparse import OptionParser\n\nfrom all_plg_srcs import PLUGINS\n\n_OPER_INSTALL_NEW, _OPER_UPDATE_EXISTING = 0, 1\n\n\nclass _CmdMaker(object):\n \"\"\"Creates parameters for subprocess.call's\n for fetching/removal of plugins\n \"\"\"\n\n _COMMANDS = {\n # working dir | the command or commands\n 'git': {\n _OPER_INSTALL_NEW: [\n (\"%(PARENT_DIR)s\", \"git clone %(SRC)s %(PKGNAME)s\"),\n (\"%(PKG_PATH)s\", \"git submodule update --init --recursive\"),\n ],\n _OPER_UPDATE_EXISTING: [\n (\"%(PKG_PATH)s\", \"git pull origin\"),\n (\"%(PKG_PATH)s\", \"git submodule update --recursive\"),\n ],\n },\n 'wget': {\n _OPER_INSTALL_NEW: [\n (\"%(PARENT_DIR)s\", \"wget --no-check-certificate %(SRC)s -O %(PKGNAME)s\"),\n ],\n _OPER_UPDATE_EXISTING: [\n (\"%(PARENT_DIR)s\", \"wget --no-check-certificate %(SRC)s -O %(PKGNAME)s\"),\n ],\n },\n 'local': {\n _OPER_INSTALL_NEW: [\n (\"%(PARENT_DIR)s\", \"cp -r %(SRC)s %(PKGNAME)s\"),\n ],\n },\n }\n\n @classmethod\n def mk_dirs_and_cmds(cls, src_type, oper_id, vim_plugin):\n assert src_type in cls._COMMANDS, (\n \"Unknown plygin src_type: '%s'\" % src_type)\n opid2ops = cls._COMMANDS[src_type]\n assert oper_id in opid2ops, (\n \"Operation %s is not supported for '%s' plugins\" % (\n oper_id, src_type))\n dirs_and_cmds = opid2ops[oper_id]\n assert all(len(dir_and_cmd) == 2 for dir_and_cmd in dirs_and_cmds), (\n \"Each element of the list should be in form \"\n \"(dir_template, command_template): {}\".format(dirs_and_cmds))\n\n placeholders = cls._mk_placeholders(vim_plugin)\n cmds = [\n (\n dir_template % placeholders,\n [chunk % placeholders for chunk in cmd_template.split()]\n ) for dir_template, cmd_template in dirs_and_cmds]\n assert cmds\n return cmds\n\n @staticmethod\n def _mk_placeholders(vim_plugin):\n if vim_plugin.src_type == \"local\":\n src = os.path.join(vim_plugin.vim_dir, vim_plugin.src)\n else:\n src = vim_plugin.src\n\n return {\n \"SRC\": src,\n \"PARENT_DIR\": vim_plugin.parent_dir,\n \"PKG_PATH\": vim_plugin.pkg_path,\n \"PKGNAME\": vim_plugin.pkg_name\n }\n\n\nclass VimPlugin(object):\n def __init__(self, vim_dir, plug_id, **kwargs):\n self.vim_dir = vim_dir\n self.plug_id = plug_id\n self.descr = kwargs.pop('descr')\n self.src_type = kwargs.pop('src_type')\n self.src = kwargs.pop('src')\n self.is_dir = kwargs.pop('is_dir', True)\n self.destination = kwargs.pop('destination', \"bundle\")\n\n self.pkg_name = self._get_pkg_name()\n self.parent_dir = os.path.join(self.vim_dir, self.destination)\n self.pkg_path = os.path.join(self.parent_dir, self.pkg_name)\n self.prev_installed = os.path.exists(self.pkg_path)\n\n def update(self, new_only):\n is_success = False\n try:\n is_success = self._do_update(new_only)\n except Exception as e:\n print(traceback.format_exc())\n print(\"... operation failed\")\n return is_success\n\n\n def _do_update(self, new_only):\n print(\"===== Processing plugin '%s' ...\" % (self.descr, ))\n if self.prev_installed:\n if new_only:\n print(\"... skip, installed previously.\")\n else:\n if self._update_existing():\n print(\"... update failed.\")\n return False\n else:\n print(\"... updated successfully.\")\n else:\n if self._install_new():\n print(\"... installation failed.\")\n return False\n else:\n print(\"... done.\")\n return True\n\n def _install_new(self):\n self._make_tgt_dir()\n for wrk_dir, cmd in _CmdMaker.mk_dirs_and_cmds(self.src_type,\n _OPER_INSTALL_NEW,\n self):\n bk_cwd = os.getcwd()\n os.chdir(wrk_dir)\n exit_status = subprocess.call(cmd)\n os.chdir(bk_cwd)\n if exit_status:\n return exit_status\n return 0\n\n def _update_existing(self):\n for wrk_dir, cmd in _CmdMaker.mk_dirs_and_cmds(self.src_type,\n _OPER_UPDATE_EXISTING,\n self):\n bk_cwd = os.getcwd()\n os.chdir(wrk_dir)\n exit_status = subprocess.call(cmd)\n os.chdir(bk_cwd)\n if exit_status:\n return exit_status\n return 0\n\n def _get_pkg_name(self):\n pkg_name = os.path.basename(self.src)\n if self.is_dir:\n pkg_name = os.path.splitext(pkg_name)[0]\n return pkg_name\n\n def _make_tgt_dir(self):\n if not os.path.exists(self.parent_dir):\n os.mkdir(self.parent_dir)\n if not os.path.isdir(self.parent_dir):\n assert False\n\n\ndef mk_new_sel_plugs_file(fname):\n cfg_file = open(fname, \"w\")\n for plug_id, plug_props in PLUGINS.items():\n cfg_file.write(\"#%(PLG_ID)-20s # %(PLG_DESCR)s\\n\" % {\n 'PLG_ID': plug_id,\n 'PLG_DESCR': plug_props['descr']})\n print(\"File with a list of awailable plugins created:\")\n print(fname)\n print(\"Uncomment all the plugins you want to install\")\n\n\ndef get_selected_plugins_list(vim_dir):\n fname = os.path.join(vim_dir, 'sel_plugs.cfg')\n if not os.path.isfile(fname):\n mk_new_sel_plugs_file(fname)\n plugs_list = [s[:s.find('#')].strip() for s in open(fname)]\n plugs_list = [s for s in plugs_list if s]\n return plugs_list\n\n\ndef process_options():\n parser = OptionParser(usage=\"usage: %prog [options] [plugin_names]\")\n parser.add_option('-l', '--list', dest='print_list', default=False,\n action='store_true',\n help=\"print list of supported/installed plugins\")\n parser.add_option('-N', '--new_only', dest='new_only', default=False,\n action='store_true',\n help=\"install new plugins only, do not update existing\")\n\n return parser.parse_args()\n\n\ndef print_plugins_list(vim_dir, sel_plug_ids):\n all_plugins = [VimPlugin(vim_dir, id, **conf)\n for (id, conf) in PLUGINS.items()]\n for plugin in all_plugins:\n is_selected = plugin.plug_id in sel_plug_ids\n fmt = \"%(ID)15s %(SELECTED)12s %(INSTALLED)-12s %(DESCR)s\"\n statedescr = fmt % {\n 'ID': plugin.plug_id,\n 'SELECTED': \"selected\" if is_selected else \"\",\n 'INSTALLED': \"installed\" if plugin.prev_installed else \"\",\n 'DESCR': plugin.descr}\n print(statedescr)\n for plug_id in sel_plug_ids:\n if plug_id not in PLUGINS:\n print(\"WARNING: Unknown plugin '%s' is selected\" % (plug_id,))\n\n\ndef check_args_plugins_selected(plug_ids, sel_plug_ids):\n for plug_id in plug_ids:\n if plug_id not in sel_plug_ids:\n print(\"Plugin '%s' is not selected. Include it into\"\n \" '~/.vim/sel_plugs.cfg' file\")\n sys.exit(1)\n\n\ndef main():\n options, args = process_options()\n\n vim_dir = os.path.abspath(os.path.expanduser(\"~/.vim\"))\n sel_plug_ids = get_selected_plugins_list(vim_dir)\n\n if options.print_list:\n print_plugins_list(vim_dir, sel_plug_ids)\n return\n\n if args:\n check_args_plugins_selected(args, sel_plug_ids)\n\n plugs_to_process = args or sel_plug_ids\n\n if plugs_to_process:\n num_failed = 0\n for plugin_id in plugs_to_process:\n plg = VimPlugin(vim_dir, plugin_id, **PLUGINS[plugin_id])\n is_success = plg.update(options.new_only)\n if not is_success:\n num_failed += 1\n if num_failed:\n print(\"\\n=== Warning: %s of %s plugins failed ===\" % (\n num_failed, len(plugs_to_process)))\n return 1\n else:\n print(\"\\n=== Ok: %s plugins processed successfully ===\" % (\n len(plugs_to_process), ))\n return 0\n else:\n print(\"\\n=== Warning: No plugins selected. ===\")\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"akorshkov/dotvimconf","sub_path":"update_plugins.py","file_name":"update_plugins.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"18335171452","text":"import logging\n\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom flask import Flask\n\nfrom app_config import AppConfig\nfrom cdm_loader.cdm_message_processor_job import CdmMessageProcessor\nfrom cdm_loader.repository.cdm_repository import CdmRepository\n\n\napp = Flask(__name__)\n\n\n# Make endpoint to check if service is up\n@app.get('/')\ndef index():\n return 'service is working'\n\n\nif __name__ == '__main__':\n app.logger.setLevel(logging.DEBUG)\n\n # Init config. For convinience getting envs is placed in a separate class\n config = AppConfig()\n\n cdmRepository = CdmRepository(config.pg_warehouse_db())\n\n # Init messages processor, pass objects to constructor\n proc = CdmMessageProcessor(config.kafka_consumer(), \n cdmRepository, app.logger) \n\n # Run processor in background\n # BackgroundScheduler will run upon schedule function \"run\" of StgMessageProcessor.\n scheduler = BackgroundScheduler()\n scheduler.add_job(func=proc.run, trigger=\"interval\", seconds=AppConfig.DEFAULT_JOB_INTERVAL)\n scheduler.start()\n\n # start Flask app to keep service running\n app.run(debug=False, host='0.0.0.0', use_reloader=False)\n","repo_name":"SergeySenigov/de-project-sprint-9","sub_path":"solution/service_cdm/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"1923766727","text":"# -*- coding: utf-8 -*-\n\nfrom random import randint\n\n# Доработать практическую часть урока lesson_007/python_snippets/08_practice.py\n\n# Необходимо создать класс кота. У кота есть аттрибуты - сытость и дом (в котором он живет).\n# Кот живет с человеком в доме.\n# Для кота дом характеризируется - миской для еды и грязью.\n# Изначально в доме нет еды для кота и нет грязи.\n\n# Доработать класс человека, добавив методы\n# подобрать кота - у кота появляется дом.\n# купить коту еды - кошачья еда в доме увеличивается на 50, деньги уменьшаются на 50.\n# убраться в доме - степень грязи в доме уменьшается на 100, сытость у человека уменьшается на 20.\n# Увеличить кол-во зарабатываемых человеком денег до 150 (он выучил пайтон и устроился на хорошую работу :)\n\n# Кот может есть, спать и драть обои - необходимо реализовать соответствующие методы.\n# Когда кот спит - сытость уменьшается на 10\n# Когда кот ест - сытость увеличивается на 20, кошачья еда в доме уменьшается на 10.\n# Когда кот дерет обои - сытость уменьшается на 10, степень грязи в доме увеличивается на 5\n# Если степень сытости < 0, кот умирает.\n# Так же надо реализовать метод \"действуй\" для кота, в котором он принимает решение\n# что будет делать сегодня\n\n# Человеку и коту надо вместе прожить 365 дней.\n\nfrom random import randint\nfrom termcolor import cprint\n\nclass Men:\n\n def __init__(self):\n self.name = 'Victor'\n self.fullness = 50\n self.house = None\n self.cat = None\n\n def __str__(self):\n return 'I am {}, fullnes is {}'.format(\n self.name,self.fullness)\n\n def go_to_clean_shop(self):\n self.house.money-=10\n cprint(f'clean shop for {self.cat}',color='blue')\n self.house.cat_duty-=30\n\n def cat_food(self):\n self.cat.cat_fullness+=10\n self.house.money-=10\n print('cat was eating')\n\n\n def eat (self):\n\n if self.house.food > 10:\n print ('{} eaten'.format(self.name) )\n self.fullness+=10\n self.house.food-=10\n else:\n print('{} without eat'.format(self.name))\n\n def work (self):\n print ('{} go to job'.format(self.name))\n self.house.money+=50\n self.fullness-=10\n\n def watch_tv (self):\n cprint ('{} watch MTV all day'.format(self.name), color='red')\n self.fullness-=20\n\n def shop(self):\n self.house.cat_food+=50\n self.house.money-=10\n if self.house.money>=50:\n print('{} go to shop'.format(self.name))\n self.house.money-=10\n self.house.food+=20\n else:\n cprint('NOT Money')\n\n def go_into_house(self, house):\n self.house = house\n self.fullness-=10\n print(f'{self.name} - moving to the house')\n\n def go_to_cat(self, cat):\n\n self.cat = cat\n print(f'{self.name} - take the cat {self.cat.cat_name}')\n\n def act(self):\n\n if self.house.food<=0:\n self.eat()\n\n if self.house.money<=0:\n self.work()\n dice = randint(1, 6)\n if self.fullness<=10:\n self.shop()\n self.eat()\n\n elif dice ==1:\n self.work()\n elif dice ==2:\n self.eat()\n else:\n self.watch_tv()\n\n\nclass House:\n\n def __init__(self):\n self.food = 50\n self.money = 100\n self.cat_food = 0\n self.cat_duty = 0\n def __str__(self):\n return 'food have {}, money is {}, cat-food have {}, cat-duty is {}'.format(\n self.food,self.money, self.cat_food, self.cat_duty)\n\nclass Cat:\n def __init__(self):\n self.cat_fullness = 50\n self.cat_name = 'marsik'\n self.house = None\n self.men = None\n\n def __str__(self):\n return f'Cat: {self.cat_name} have cat-food: {self.house.cat_food} and cat-fullness {self.cat_fullness}'\n\n def cat_food(self):\n self.men.work()\n self.house.cat_food-=20\n self.cat_fullness+=30\n\n def cat_fullness(self):\n if self.cat_fullness<= 10:\n self.men.cat_food()\n self.house.cat_food-=10\n\n def cat_play(self):\n self.house.cat_duty+=10\n\n def cat_clean(self):\n self.men.go_to_clean_shop()\n self.house.money-=10\n\n def cat_go_house(self, men, house):\n print('cat was the man and the house ')\n self.men= men\n self.house=house\n\n\n def cat_act(self):\n self.cat_play()\n self.cat_fullness-=10\n if self.house.cat_food <= 10:\n self.men.shop()\n if self.house.cat_duty>=50:\n self.cat_clean()\n if self.cat_fullness<=10:\n self.cat_food()\n\n\n\nhome = House()\nmarsik = Cat()\n\nvictor = Men()\n\nvictor.go_into_house(house=home)\nvictor.go_to_cat(cat=marsik)\nmarsik.cat_go_house( house=home,men=victor)\n\nfor day in range (1,15):\n cprint(f'============= day - {day}====================', color='green')\n victor.act()\n\n marsik.cat_act()\n print(victor)\n\n print(marsik)\n print(home)\n# Усложненное задание (делать по желанию)\n# Создать несколько (2-3) котов и подселить их в дом к человеку.\n# Им всем вместе так же надо прожить 365 дней.\n\n# (Можно определить критическое количество котов, которое может прокормить человек...)\n","repo_name":"genrobaksel/pythonProject1","sub_path":"lesson_007/03_man_ans_cat.py","file_name":"03_man_ans_cat.py","file_ext":"py","file_size_in_byte":6287,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"8053959169","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\nimport matplotlib.gridspec as gs\nimport numpy as np\nfrom pathlib import Path\nimport pickle\nfrom scipy.interpolate import interp1d\n\nplt.rcParams.update({'font.family': 'cmr10',\n 'font.size': 12,\n 'axes.unicode_minus': False,\n 'axes.labelsize': 12,\n 'axes.labelsize': 12,\n 'figure.figsize': (4, 4),\n 'figure.dpi': 80,\n 'mathtext.fontset': 'cm',\n 'mathtext.rm': 'serif',\n 'xtick.direction': 'in',\n 'ytick.direction': 'in',\n 'xtick.top': True,\n 'ytick.right': True\n })\nproject_dir = Path(__file__).resolve().parents[2]\np_name = project_dir.joinpath('data/modelpredictions/')\ndata_dir = str(p_name.resolve())\ndata_dir = 'data/modelpredictions/'\n\n# Bhattacharyya distance\nplt.figure(figsize=(8.5, 2.5))\ngs0 = gs.GridSpec(1, 3, wspace=0.1,)\nfor prong in [2, 3, 4]:\n\n with open(data_dir + 'Metrics_{0}p.p'.format(prong), 'rb') as fin:\n AllMets = pickle.load(fin)\n\n ax2 = plt.subplot(gs0[prong - 2])\n plt.xlabel('Signal Efficiency')\n print(AllMets['BaseNeuralNetwork'])\n base, = plt.plot(AllMets['BaseNeuralNetwork']['efficiencies'],\n AllMets['BaseNeuralNetwork']['BhatD'],\n color='C0'\n )\n\n plt.plot(AllMets['BaseNeuralNetwork']['efficiencies'],\n AllMets['BaseNeuralNetwork']['JSD'],\n color='C0', ls='--'\n )\n pca, = plt.plot(AllMets['PCANeuralNetwork']['efficiencies'],\n AllMets['PCANeuralNetwork']['BhatD'],\n color='C3'\n )\n plt.plot(AllMets['PCANeuralNetwork']['efficiencies'],\n AllMets['PCANeuralNetwork']['JSD'],\n color='C3',\n ls='--'\n )\n planed, = plt.plot(AllMets['PlanedNeuralNetwork']['efficiencies'],\n AllMets['PlanedNeuralNetwork']['BhatD'],\n color='C2'\n )\n plt.plot(AllMets['PlanedNeuralNetwork']['efficiencies'],\n AllMets['PlanedNeuralNetwork']['JSD'],\n color='C2',\n ls='--'\n )\n uboost, = plt.plot(AllMets['uBoost']['efficiencies'],\n AllMets['uBoost']['BhatD'],\n color='blue',\n )\n plt.plot(AllMets['uBoost']['efficiencies'],\n AllMets['uBoost']['JSD'],\n color='blue',\n ls='--'\n )\n adversary, = plt.plot(AllMets['AdversaryLambda_050']['efficiencies'],\n AllMets['AdversaryLambda_050']['BhatD'],\n color='C4',\n )\n plt.plot(AllMets['AdversaryLambda_050']['efficiencies'],\n AllMets['AdversaryLambda_050']['JSD'],\n color='C4',\n ls='--'\n )\n\n if prong == 2:\n plt.ylabel('Histogram Distance')\n # plt.ylabel('Bhat. Dist.')\n plt.legend([Patch(facecolor='C0',\n label='Original'),\n Patch(facecolor='C3',\n label='PCA'),\n Patch(facecolor='C2',\n label='Planed'),\n Patch(facecolor='blue',\n label='uBoost'),\n Patch(facecolor='C4',\n label='Adv')],\n ['Original', 'PCA', 'Planed', 'uBoost',\n 'Adv'],\n fontsize=10,\n frameon=True,\n labelspacing=0.15\n )\n elif prong == 3:\n BD, = plt.plot([], [], color='gray')\n JS, = plt.plot([], [], color='gray', ls='--')\n plt.legend([BD, JS], # , singlevar],\n ['Bhattacharyya', 'Jensen-Shannon'], # , 'Single Variable'],\n fontsize=10,\n frameon=True)\n plt.setp(ax2.get_yticklabels(), visible=False)\n else:\n plt.setp(ax2.get_yticklabels(), visible=False)\n plt.title('{0}-prong'.format(prong))\n plt.ylim(0, 1.0)\n plt.xticks([0, 0.25, 0.5, 0.75, 1.0]\n )\n plt.xlim(-.05, 1.05)\n plt.grid()\n plt.minorticks_on()\n\nplt.savefig('reports/figures/DistanceCompare.pdf',\n bbox_inches='tight')\nplt.clf()\nplt.close()\n","repo_name":"bostdiek/MassAgnostic-JetTaggers","sub_path":"src/visualization/CompareDistanceMeasures.py","file_name":"CompareDistanceMeasures.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"30992113785","text":"import numpy as np\nimport knn\n\n\ndef contingency(cl_orig, cl_test):\n table = {\n \"TP\": 0.,\n \"FP\": 0.,\n \"FN\": 0.,\n \"TN\": 0.\n }\n\n for i in range(len(cl_orig)):\n if cl_orig[i] == 0:\n if cl_test[i] == 0:\n table[\"TN\"] += 1\n else:\n table[\"FP\"] += 1\n else:\n if cl_test[i] == 0:\n table[\"FN\"] += 1\n else:\n table[\"TP\"] += 1\n\n table[\"P\"] = table[\"TP\"] + table[\"FN\"]\n table[\"N\"] = table[\"TN\"] + table[\"FP\"]\n\n if table[\"TP\"] + table[\"FP\"] == 0.:\n table[\"PPV\"] = 0.\n else:\n table[\"PPV\"] = table[\"TP\"] / (table[\"TP\"] + table[\"FP\"])\n\n table[\"ACC\"] = (table[\"TP\"] + table[\"TN\"]) / (table[\"P\"] + table[\"N\"])\n\n if table[\"P\"] == 0.:\n table[\"TRP\"] = 0.\n else:\n table[\"TRP\"] = table[\"TP\"] / table[\"P\"]\n\n if table[\"PPV\"] + table[\"TRP\"] == 0.:\n table[\"F1\"] = 0.\n else:\n table[\"F1\"] = 2 * table[\"PPV\"] * table[\"TRP\"] / (table[\"PPV\"] + table[\"TRP\"])\n\n return table\n\n\ndef __cut_from_array(array, start, end):\n return np.concatenate((array[:start], array[end:])), array[start:end]\n\n\ndef remove_noise(points, classes, metric, kernel, k):\n fds = folds(points, classes, len(points))\n pts, cls = [], []\n\n for fold in fds:\n cl = knn.classify(fold[\"train_p\"], fold[\"train_c\"], fold[\"test_p\"][0], metric, kernel, k)\n if cl == fold[\"test_c\"][0]: # not noise\n pts.append(fold[\"test_p\"][0])\n cls.append(cl)\n\n return np.array(pts), np.array(cls)\n\n\ndef folds(points, classes, folds_num, shuffle=False):\n fds = []\n\n if shuffle:\n shape = points.shape\n data = np.zeros((shape[0], shape[1] + 1))\n data[:, :-1] = points\n data[:, -1] = classes\n\n np.random.shuffle(data)\n points = data[:, :-1]\n classes = data[:, -1]\n\n size = len(points) / folds_num\n for start in range(0, len(points), size):\n train_p, test_p = __cut_from_array(points, start, start + size)\n train_c, test_c = __cut_from_array(classes, start, start + size)\n fds.append({\"train_p\": train_p, \"train_c\": train_c, \"test_p\": test_p, \"test_c\": test_c})\n\n return fds\n","repo_name":"eadm/ML","sub_path":"knn/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"7752765388","text":"\"\"\"\nLer uma quantidade de notas determinada de alunos\narmazenar no vetor\ncalcular média\nimprimir média\n\"\"\"\n\nlista = []\nfor i in range(0, 5):\n notas = int(input('Digite as notas: '))\n lista.append(notas)\nsoma = sum(lista)\nmedia = soma / 5\nprint(media)\n","repo_name":"CarloShadow/CodesByCJ","sub_path":"Python/Exercicios/Coleções Python/Part 1/Ex10.py","file_name":"Ex10.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"18856426198","text":"import pickle\nlist1=pickle.load(open(\"/users/PAS0272/osu10258/data/a2.pickle\",\"rb\"))\n#print (list1)\nlist2=pickle.load(open(\"/users/PAS0272/osu10258/data/a3.pickle\",\"rb\"))\n#print (list2[449])\n#print (list2[2588])\nprint (list1)\n\ncount_total=[0,0,0,0,0]\nfor index1,list1s in enumerate(list1):\n\tcount=0\n\tfor list2s in list1s:\n\t\tcount+=list2s\n\tif (count==3):\n\t\tprint(index1)\n\t\tbreak\nprint(list2[14])\nprint(list1[14])\n","repo_name":"akmrak/ISIC_task2","sub_path":"a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"34876446958","text":"import pandas as pd\nimport request\nfrom pymongo import MongoClient\nimport pymongo\n\n#BBDD\n\nclient=pymongo.MongoClient(\"mongodb://localhost/ComprApp\")\n\n#Collections\nmydb = client.get_database()\nuserColl = mydb['usuarios']\nrecetasColl = mydb['recetas'] \n\ndef crea_recetas(df):\n \"\"\"\n This function generates the selected recipes explanation\n \"\"\"\n\n #Getting all recipes\n datas= mydb.recetas.find({} , { \"_id\":0,\"nombre\":1,\"receta\":1}) \n\n #As a df\n df_recetas=pd.DataFrame(list(datas))\n\n #Setting new index\n df_recetas.set_index([\"nombre\"],inplace=True)\n\n #Iterating to get the recipes\n for e in df_recetas.index:\n if e in df.head(5).index:\n print(df_recetas.loc[e])\n\n return df_recetas","repo_name":"luissanchezgrisolia/Ironhack-Final-Project","sub_path":"SRC/genera_recetas.py","file_name":"genera_recetas.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"35352474736","text":"import random\n\nfrom solutions.day_1 import Day1\n\n\ndef test_part_1(day_1_input):\n solver = Day1(day_1_input)\n\n assert solver.part_1() == 514579\n\n\ndef test_part_2(day_1_input):\n solver = Day1(day_1_input)\n\n assert solver.part_2() == 241861950\n\n\ndef generate_test_case(length, max_int):\n numbers = list(range(2021))\n i = 0\n j = len(numbers) - 1\n possible_pairs = []\n while i <= j:\n possible_pairs.append((numbers[i], numbers[j]))\n i += 1\n j -= 1\n\n a, b = possible_pairs[random.randint(0, len(possible_pairs) - 1)]\n test_case = [a, b]\n for _ in range(length - 2):\n n = random.randint(0, max_int)\n while n == a or n == b:\n n = random.randint(0, max_int)\n test_case.append(n)\n\n with open(\"test.txt\", \"w\") as f:\n f.writelines([str(n) + \"\\n\" for n in test_case])\n","repo_name":"guillermo-carrasco/advent-of-code-2020","sub_path":"tests/test_day_1.py","file_name":"test_day_1.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"70151912287","text":"#!/usr/bin/env python3\n\"\"\"Returning the location of the ISS in latitude/longitude\"\"\"\nimport requests\nimport datetime\nimport reverse_geocoder as revgeo\n\nURL= \"http://api.open-notify.org/iss-now.json\"\ndef main():\n resp= requests.get(URL).json()\n\n #Api changes each time a GET request is sent \n lon = resp[\"iss_position\"][\"longitude\"]\n lat = resp[\"iss_position\"][\"latitude\"]\n epoch_time = resp[\"timestamp\"]\n \n location_resp = revgeo.search((lat, lon))\n\n city = location_resp[0][\"name\"]\n\n country = location_resp[0][\"cc\"]\n\n date_time = datetime.datetime.fromtimestamp(epoch_time)\n\n print(f\"\"\"CURRENT LOCATION OF THE ISS:\n Timestamp: {date_time}\n Lon: {lon}\n lat: {lat}\n City/Country: {city}, {country}\n \"\"\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hussainkghani/mycode","sub_path":"challenge_issTracker.py","file_name":"challenge_issTracker.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"15342108117","text":"\"\"\"\"\"\"\"\"\"\nPytorch implementation of \"A simple neural network module for relational reasoning\nCode is based on pytorch/examples/mnist (https://github.com/pytorch/examples/tree/master/mnist)\n\"\"\"\"\"\"\"\"\"\nfrom __future__ import print_function\nimport argparse\nimport copy\nimport os\n# import cPickle as pickle\nimport pickle\nimport random\nimport numpy as np\nfrom relation_network import RN, CNN_MLP, Pool\nimport torch\nfrom torch.autograd import Variable\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch Relational-Network sort-of-CLVR Example')\nparser.add_argument('--model', type=str, choices=['RN', 'CNN_MLP', 'Pool'], default='RN',\n help='resume from model stored')\nparser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=50, metavar='N',\n help='number of epochs to train (default: 20)')\nparser.add_argument('--lr', type=float, default=0.0001, metavar='LR',\n help='learning rate (default: 0.0001)')\nparser.add_argument('--pre-relational', action='store_true', default=False,\n help='Adds pre-relational layers')\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\nparser.add_argument('--save-all', action='store_true', default=False,\n help='save each epoch')\nparser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\nparser.add_argument('--resume', type=str, help='resume from model stored')\nparser.add_argument('--data-dir', type=str, default=\"Data/\", help='Data directory')\nparser.add_argument('--saved-model-dir', type=str, default=\"Saved_Models/\", help='Saved model directory')\nparser.add_argument('--name', type=str, default=\"model\", help='Saved model directory')\nparser.add_argument('--gelu', action='store_true', default=False, help='use gelu as act func')\nparser.add_argument('--sigmoid', action='store_true', default=False, help='use sigmoid as act func')\nparser.add_argument('--tanh', action='store_true', default=False, help='use tanh as act func')\nparser.add_argument('--num_outputs', type=int, default=10, help='number of outputs')\n\nargs = parser.parse_args()\nargs.cuda = not args.no_cuda and torch.cuda.is_available()\nprint(\"Using CUDA: \", args.cuda)\ndevice = torch.device(\"cuda\" if args.cuda else \"cpu\")\n\n# args.resume = \"epoch_3_way_RN_02.pth\"\n\ntorch.manual_seed(args.seed)\nif args.cuda:\n torch.cuda.manual_seed(args.seed)\n\nif args.model == 'CNN_MLP':\n model = CNN_MLP(args, device).to(device)\nelif args.model == 'RN':\n model = RN(args, device).to(device)\nelif args.model == 'Pool':\n model = Pool(args, device).to(device)\n\nbs = args.batch_size\ninput_img = torch.FloatTensor(bs, 3, 75, 75).to(device)\ninput_img_unscaled = torch.FloatTensor(bs, 3, 300, 300).to(device)\ninput_qst = torch.FloatTensor(bs, 11).to(device)\nlabel = torch.LongTensor(bs).to(device)\n\ninput_img = Variable(input_img)\ninput_img_unscaled = Variable(input_img_unscaled)\ninput_qst = Variable(input_qst)\nlabel = Variable(label)\n\nbest_acc = 0\nbest_epoch = 0\nbest_model_wts = copy.deepcopy(model.state_dict())\nhist = []\n\n\ndef tensor_data(data, i):\n img = torch.from_numpy(np.asarray(data[0][bs * i:bs * (i + 1)]))\n qst = torch.from_numpy(np.asarray(data[1][bs * i:bs * (i + 1)]))\n ans = torch.from_numpy(np.asarray(data[2][bs * i:bs * (i + 1)]))\n\n input_img_unscaled.data.resize_(img.size()).copy_(img)\n input_img.copy_(torch.nn.functional.interpolate(input_img_unscaled, 75))\n input_qst.data.resize_(qst.size()).copy_(qst)\n label.data.resize_(ans.size()).copy_(ans)\n\n\ndef cvt_data_axis(data):\n img = [e[0] for e in data]\n qst = [e[1] for e in data]\n ans = [e[2] for e in data]\n return (img, qst, ans)\n\n\ndef train(epoch, rel, norel):\n model.to(device).train()\n\n if not len(rel[0]) == len(norel[0]):\n print('Not equal length for relation dataset and non-relation dataset.')\n return\n\n random.shuffle(rel)\n random.shuffle(norel)\n\n rel = cvt_data_axis(rel)\n norel = cvt_data_axis(norel)\n\n for batch_idx in range(len(rel[0]) // bs):\n tensor_data(rel, batch_idx)\n accuracy_rel = model.train_(input_img.to(device), input_qst.to(device), label.to(device))\n\n tensor_data(norel, batch_idx)\n accuracy_norel = model.train_(input_img.to(device), input_qst.to(device), label.to(device))\n\n if batch_idx % args.log_interval == 0:\n print(\n 'Train Epoch: {} [{}/{} ({:.0f}%)] Relations accuracy: {:.0f}% | Non-relations accuracy: {:.0f}%'.format(\n epoch, batch_idx * bs * 2, len(rel[0]) * 2, \\\n 100. * batch_idx * bs / len(rel[0]), accuracy_rel, accuracy_norel))\n\n\ndef test(epoch, rel, norel, best_acc, best_epoch, best_model_wts):\n model.to(device).eval()\n if not len(rel[0]) == len(norel[0]):\n print('Not equal length for relation dataset and non-relation dataset.')\n return\n\n rel = cvt_data_axis(rel)\n norel = cvt_data_axis(norel)\n\n accuracy_rels = []\n accuracy_norels = []\n for batch_idx in range(len(rel[0]) // bs):\n tensor_data(rel, batch_idx)\n accuracy_rels.append(model.test_(input_img.to(device), input_qst.to(device), label.to(device)))\n\n tensor_data(norel, batch_idx)\n accuracy_norels.append(model.to(device).test_(input_img.to(device), input_qst.to(device), label.to(device)))\n\n accuracy_rel = sum(accuracy_rels) / len(accuracy_rels)\n accuracy_norel = sum(accuracy_norels) / len(accuracy_norels)\n if epoch > -1:\n hist.append((epoch, accuracy_rel, accuracy_norel))\n print('\\n Test set: Relation accuracy: {:.0f}% | Non-relation accuracy: {:.0f}%\\n'.format(\n accuracy_rel, accuracy_norel))\n\n if accuracy_norel + accuracy_rel > best_acc:\n best_acc = accuracy_norel + accuracy_rel\n best_epoch = epoch\n best_model_wts = copy.deepcopy(model.state_dict())\n return best_acc, best_epoch, best_model_wts\n\n\ndef load_data():\n print('loading data...')\n dirs = args.data_dir\n filename = os.path.join(dirs, 'sort-of-clevr.pickle')\n with open(filename, 'rb') as f:\n train_datasets, test_datasets = pickle.load(f)\n rel_train = []\n rel_test = []\n norel_train = []\n norel_test = []\n print('processing data...')\n\n for img, relations, norelations in train_datasets:\n img = np.swapaxes(img, 0, 2)\n for qst, ans in zip(relations[0], relations[1]):\n rel_train.append((img, qst, ans))\n for qst, ans in zip(norelations[0], norelations[1]):\n norel_train.append((img, qst, ans))\n\n for img, relations, norelations in test_datasets:\n img = np.swapaxes(img, 0, 2)\n for qst, ans in zip(relations[0], relations[1]):\n rel_test.append((img, qst, ans))\n for qst, ans in zip(norelations[0], norelations[1]):\n norel_test.append((img, qst, ans))\n\n return (rel_train, rel_test, norel_train, norel_test)\n\n\nrel_train, rel_test, norel_train, norel_test = load_data()\n\ntry:\n os.makedirs(args.saved_model_dir)\nexcept:\n print('directory {} already exists'.format(args.saved_model_dir))\n\nif args.resume:\n filename = os.path.join(args.saved_model_dir, args.resume)\n if os.path.isfile(filename):\n print('==> loading checkpoint {}'.format(filename))\n checkpoint = torch.load(filename, map_location=None if args.cuda else torch.device('cpu'))\n model.load_state_dict(checkpoint)\n print('==> loaded checkpoint {}'.format(filename))\n\nfor epoch in range(1, args.epochs + 1):\n train(epoch, rel_train, norel_train)\n best_acc, best_epoch, best_model_wts = test(epoch, rel_test, norel_test, best_acc, best_epoch, best_model_wts)\n if args.save_all:\n model.save_model(epoch, args.name)\nmodel.load_state_dict(best_model_wts)\nmodel.save_model(0, args.name)\nprint(\"best\")\ntest(-1, rel_test, norel_test, best_acc, best_epoch, best_model_wts)\ntry:\n os.makedirs(\"./Results/ExplainRN/{}/\".format(args.name))\nexcept:\n pass\nfile_object = open('Results/ExplainRN/{}/RN_hist_{}.txt'.format(args.name, args.name), 'w')\nfile_object.write(str(hist))\nfile_object.close()\n# left: rel, small pre, pre\n","repo_name":"slerman12/ExplainingInteractions","sub_path":"TaylorCAM/RelationalReasoning.py","file_name":"RelationalReasoning.py","file_ext":"py","file_size_in_byte":8519,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"88"} +{"seq_id":"15946287353","text":"def retorna_soma(p1, f1):\r\n if len(p1) == 0 or len(f1) == 0:\r\n return 'A pilha e a fila precisam ter no minimo um valor'\r\n ### Pegando maior par da fila ###\r\n flag = 0\r\n max_fila = -1\r\n while len(f1) != 0:\r\n valor = f1.remove()\r\n if (valor % 2 == 0) and flag == 0:\r\n max_fila = valor\r\n flag = 1\r\n if valor % 2 == 0:\r\n par = valor\r\n if par > max_fila:\r\n max_fila = par\r\n ### Pegando menor impar da pilha ###\r\n flag = 0\r\n min_pilha = -1\r\n while len(p1) != 0:\r\n valor = p1.remove()\r\n if (valor % 2 != 0) and flag == 0:\r\n min_pilha = valor\r\n flag = 1\r\n if valor % 2 != 0:\r\n impar = valor\r\n if impar < min_pilha:\r\n min_pilha = impar\r\n ### Retornando a soma considerando a fila e pilha ###\r\n if min_pilha != -1 and max_fila != -1:\r\n return max_fila + min_pilha \r\n elif max_fila == -1:\r\n return min_pilha\r\n else: return max_fila","repo_name":"merino626/Projetos","sub_path":"Estrutura_de_dados/Files/Ac3/so_a_func2.py","file_name":"so_a_func2.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"88"} +{"seq_id":"17981463488","text":"'''\nPROBLEM STATEMENT\n-----------------\nGiven a binary array A of size N and an integer M. Find the maximum number of consecutive 1's produced by flipping at most M 0's.\n\nInput:\nThe first line contains an integer T denoting the total number of test cases. In each test cases, the first line contains an integer N denoting the size of array. The second line contains N space-separated integers A1, A2, ..., AN denoting the elements of the array. Third line consists of an integer m that is maximum number of flips allowed.\n\nOutput:\nOutput the maximum numbers of consecutive 1's.\n\nConstraints:\n1 <= T <= 103\n1 <= N <= 107\n0 <= M <= N\n0 <= Ai <= 1\n\nExample:\nInput:\n1\n11\n1 0 0 1 1 0 1 0 1 1 1\n2\n\nOutput:\n8\n\nExplanation:\nTestcase 1: Maximum subarray is of size 8 which can be made subarray of all 1 after flipping two zeros to 1.\n\nLOGIC\n-----\nFor all positions of 0’s calculate left[] and right[] which defines the number of consecutive 1’s to the left of i and right of i respectively.\n\nFor example, for arr[] = {1, 1, 0, 1, 1, 0, 0, 1, 1, 1} and m = 1, left[2] = 2 and right[2] = 2, left[5] = 2 and right[5] = 0, left[6] = 0 and right[6] = 3.\n\nleft[] and right[] can be filled in O(n) time by traversing array once and keeping track of last seen 1 and last seen 0. While filling left[] and right[], we also store indexes of all zeroes in a third array say zeroes[]. For above example, this third array stores {2, 5, 6}\n\nNow traverse zeroes[] and for all consecutive m entries in this array, compute the sum of 1s that can be produced. This step can be done in O(n) using left[] and right[].\n\nAn Efficient Solution can solve the problem in O(n) time and O(1) space. The idea is to use Sliding Window for the given array. The solution is taken from here.\nLet us use a window covering from index wL to index wR. Let the number of zeros inside the window be zeroCount. We maintain the window with at most m zeros inside.\n\nThe main steps are:\n– While zeroCount is no more than m: expand the window to the right (wR++) and update the count zeroCount.\n– While zeroCount exceeds m, shrink the window from left (wL++), update zeroCount;\n– Update the widest window along the way. The positions of output zeros are inside the best window.\n\nSOURCE\n------\ngeeksforgeeks\n\nCODE\n----\n'''\ndef maximize_ones(l, n, m):\n left = right = 0\n window = 0\n zero_count = 0\n \n while right < n:\n \n if zero_count <= m:\n if l[right] == 0:\n zero_count += 1\n right += 1\n \n if zero_count > m:\n if l[left] == 0:\n zero_count -= 1\n left += 1\n \n if (right - left > window) and zero_count <= m:\n window = right - left\n \n return window\n\nt = int(input())\nfor _ in range(t):\n n = int(input())\n l = [int(x) for x in input().split()]\n m = int(input())\n print(maximize_ones(l, n, m))\n","repo_name":"swatia-code/data_structure_and_algorithm","sub_path":"array/maximize_number_of_ones.py","file_name":"maximize_number_of_ones.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"35269689989","text":"# Faça um Programa que leia três números e mostre o maior deles.\n\n# primeiro_numero = int(input(\"Digite o primeiro numero: \"))\n# segundo_numero = int(input(\"Digite o segundo numero: \"))\n# terceiro_numero = int(input(\"Digite o terceiro numero: \"))\n# numeros = [primeiro_numero, segundo_numero, terceiro_numero]\n\n# print(str(numeros))\n\nprimeiro_numero = int(input(\"Digite o primeiro numero: \"))\nsegundo_numero = int(input(\"Digite o segundo numero: \"))\nterceiro_numero = int(input(\"Digite o terceiro numero: \"))\nnumeros = [primeiro_numero, segundo_numero, terceiro_numero]\nlist.sort(numeros)\nmaior = numeros[2]\nprint(f'O maior número: {maior}.')\n\nif(primeiro_numero > segundo_numero or primeiro_numero > terceiro_numero):\n print(\"O primeiro numero escolhido foi o maior: \" +str(primeiro_numero))\nelse:\n if(segundo_numero > terceiro_numero):\n print(\"O segundo numero escolhido foi o maior: \" +str(segundo_numero))\n else:\n print(\"O terceiro numero escolhido foi o maior: \" +str(terceiro_numero))\n\n# maior_numero = None\n\n# for num in numeros:\n# if (maior_numero is None or num > maior_numero):\n# maior_numero = num\n\n# print('O maior número foi:', maior_numero)","repo_name":"blackstarxd/Dev-com-o-Dormind0","sub_path":"Python Brasil - Estrutura de Decisão/exercicio 6.py","file_name":"exercicio 6.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"12374373617","text":"import requests\nfrom parsel import Selector\nimport re\n\nfrom utils.InvestExceptions import *\n\nclass InvestParser():\n \n def __init__(self, id, company):\n \n company = company.split('?')\n \n company_path = company[0]\n company_query = ''\n \n if len(company) > 1:\n company_query = company[1]\n \n urlSummary = f'https://www.investing.com/instruments/Financials/changesummaryreporttypeajax?action=change_report_type&pid={id}&financial_id={id}&ratios_id={id}&period_type=Annual'\n urlRatios = f'https://www.investing.com/{company_path}-ratios{\"?\" + company_query if company_query != \"\" else \"\"}'\n urlTechnical = f'https://www.investing.com/{company_path}/technical/technical-summary{\"?\" + company_query if company_query != \"\" else \"\"}'\n urlIncome = f'https://www.investing.com/instruments/Financials/changereporttypeajax?action=change_report_type&pair_ID={id}&report_type=INC&period_type=Annual'\n \n summary_res = requests.get(urlSummary)\n ratios_res = requests.get(urlRatios)\n technical_res = requests.get(urlTechnical)\n income_res = requests.get(urlIncome)\n \n if summary_res.status_code != 200:\n raise NotFoundCompany()\n \n if ratios_res.status_code != 200:\n raise NotFoundCompany()\n \n if technical_res.status_code != 200:\n raise NotFoundCompany()\n \n if income_res.status_code != 200:\n raise NotFoundCompany()\n \n self.summary = Selector(text=summary_res.text)\n self.ratios = Selector(text=ratios_res.text)\n self.technical = Selector(text=technical_res.text)\n self.income = Selector(text=income_res.text)\n \n def parse(self):\n \n result = {}\n errors = []\n \n result[\"country\"] = self.parse_country()\n result[\"industry\"] = self.parse_info('Industry')\n result['sector'] = self.parse_info('Sector')\n result[\"title\"] = self.parse_title()\n result[\"ebitda\"] = self.parse_ebitda()\n result[\"net_profit_margin\"] = self.parse_net_profit_margin()\n result[\"debt_to_equity\"] = self.parse_debt_to_equity()\n result[\"eps\"] = self.parse_eps()\n result[\"p_e\"] = self.parse_p_e()\n result[\"p_s\"] = self.parse_from_ratios('Price to Sales ')\n result[\"roe\"] = self.parse_roe()\n result[\"roa\"] = self.parse_roa()\n result['tech_analysis'] = self.parse_tech_analysis()\n \n for param in result.keys():\n \n if result[param] == None:\n \n errors.append(param)\n \n if len(errors) > 0:\n \n raise NotFoundParams(errors)\n \n float_list = ['ebitda', 'net_profit_margin', 'debt_to_equity', 'eps', 'p_e', 'p_s', 'roe', 'roa']\n\n for name in float_list:\n\n result[name] = float(result[name])\n \n return result\n \n def parse_title(self):\n \n title = self.ratios.xpath('//h1/text()').get()\n \n if title == None:\n return None\n \n return re.sub(r'\\) ', ')', title)\n \n def parse_ebitda(self):\n \n ebitda = self.summary.xpath(\"/html/body/div[1]/table/tbody/tr[1]/td[2]/text()\").get()\n \n if ebitda == None:\n ebitda = self.summary.xpath(\"/html/body/div[1]/table/tbody/tr[1]/td[3]/text()\").get()\n \n return ebitda\n \n def parse_net_income(self):\n \n income = self.summary.xpath(\"/html/body/div[1]/table/tbody/tr[4]/td[2]/text()\").get()\n \n if income == None:\n income = self.summary.xpath(\"/html/body/div[1]/table/tbody/tr[4]/td[3]/text()\").get()\n \n return income\n \n def parse_equity(self):\n equity = self.summary.xpath('//td[text()=\"Total Equity\"]/following-sibling::td[1]/text()').get()\n \n if equity == None:\n equity = self.summary.xpath('//td[text()=\"Total Equity\"]/following-sibling::td[2]/text()').get()\n \n return equity\n \n def parse_debt_to_equity(self):\n \n equity = self.parse_equity()\n debt = self.summary.xpath('//td[text()=\"Total Liabilities\"]/following-sibling::td[1]/text()').get()\n \n if equity == None or debt == None:\n return None\n \n value = round((float(debt) / float(equity)), 2)\n \n return value\n \n def parse_net_profit_margin(self):\n \n npm = self.parse_from_ratios('Net Profit margin ')\n \n if npm == None:\n income = self.parse_net_income()\n ebitda = self.parse_ebitda()\n \n if income == None or ebitda == None:\n return None\n \n npm = round((float(income) / float(ebitda)), 2)\n \n else:\n npm = float(re.sub(r'%', '', npm)) / 100\n \n return npm\n \n def parse_assets(self):\n assets = self.summary.xpath(\"/html/body/div[3]/table/tbody/tr[1]/td[2]/text()\").get()\n\n if assets == None:\n assets = self.summary.xpath(\"/html/body/div[3]/table/tbody/tr[1]/td[3]/text()\").get()\n \n return assets\n \n def parse_roa(self):\n \n roa = self.parse_from_ratios('Return on Assets ')\n \n if roa == None or roa == '0%':\n income = self.parse_net_income()\n assets = self.parse_assets()\n \n if income == None or assets == None:\n return None\n \n roa = round((float(income) / float(assets)), 2)\n \n else:\n roa = float(re.sub(r'%', '', roa)) / 100\n \n return roa\n \n def parse_roe(self):\n \n roe = self.parse_from_ratios('Return on Equity ')\n \n if roe == None or roe == '0%':\n income = self.parse_net_income()\n equity = self.parse_equity()\n \n if income == None or equity == None:\n return None\n \n roe = round((float(income) / float(equity)), 2)\n else:\n roe = float(re.sub(r'%', '', roe)) / 100\n \n return roe\n \n def parse_p_e(self):\n \n p_e = self.parse_from_ratios('P/E Ratio ')\n \n if p_e == None:\n p_e = self.parse_from_technical('P/E Ratio')\n \n return p_e\n \n def parse_eps(self):\n \n eps = self.parse_from_technical('EPS')\n \n if eps == None:\n eps = self.parse_from_ratios('Basic EPS ')\n \n if eps == None:\n eps = self.parse_from_ratios('Diluted EPS ')\n \n if eps == None:\n eps = self.income.xpath('//span[text()=\"Diluted Normalized EPS\"]/../following-sibling::td[1]/text()').get()\n \n if eps == None:\n eps = self.income.xpath('//span[text()=\"Diluted Normalized EPS\"]/../following-sibling::td[2]/text()').get()\n \n return eps\n \n def parse_from_ratios(self, text):\n \n ratio = self.ratios.xpath(f'//table[@id=\"rrTable\"]//span[text()=\"{text}\"]/../following-sibling::td[1]/text()').get()\n \n if ratio == '-':\n return None\n \n return ratio\n \n def parse_tech_analysis(self):\n \n return self.technical.xpath('//table/tbody/tr[3]/td[6]/text()').get()\n \n def parse_from_technical(self, text):\n \n return self.technical.xpath(f'//dt[text()=\"{text}\"]/following-sibling::dd/span/span[2]/text()').get()\n \n def parse_country(self):\n \n country = self.parse_info('Market')\n \n if country == None:\n country = self.ratios.xpath(f'//span[text()=\"Market:\"]/following-sibling::span/@title').get()\n \n return country\n \n def parse_info(self, text):\n \n info = self.technical.xpath(f'//div[text()=\"{text}\"]/following-sibling::a[1]/text()').get()\n \n if info == None:\n info = self.technical.xpath(f'//div[text()=\"{text}\"]/a/text()').get()\n \n return info","repo_name":"Scull56/InvestParser","sub_path":"src/parsers/InvestParser.py","file_name":"InvestParser.py","file_ext":"py","file_size_in_byte":7701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"24909480405","text":"import os\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom configuration import CONFIGURATION\n\ndata = {\n \"Cr-Cr\": {\n \"energy\": [],\n \"distance\": [],\n },\n \"Cr-Fe\": {\n \"energy\": [],\n \"distance\": [],\n },\n \"Fe-Cr\": {\n \"energy\": [],\n \"distance\": [],\n },\n \"Fe-Fe\": {\n \"energy\": [],\n \"distance\": [],\n },\n}\n\nif __name__ == \"__main__\":\n dirs = os.listdir(\"./out\")\n dirs = [d for d in dirs if os.path.isdir(os.path.join(\"./out\", d))]\n for d in dirs:\n specie_i, specie_j, number = d.split(\"-\")\n key = \"{}-{}\".format(specie_i, specie_j)\n data_path = os.path.join(\"./out\", d, \"log.lammps\")\n with open(data_path, \"r\") as f:\n lines = f.readlines()\n lines = [line.strip() for line in lines]\n parts = lines[-1].split()\n parts = [p.strip() for p in parts]\n distance = []\n energy = []\n for i, p in enumerate(parts[-2*CONFIGURATION[\"n_images\"]:]):\n if i % 2 == 0:\n distance.append(float(p))\n else:\n energy.append(float(p))\n energy = [e - energy[0] for e in energy]\n data[key][\"distance\"].append(distance)\n data[key][\"energy\"].append(energy)\n for key in data:\n fig, ax = plt.subplots()\n for distance, energy in zip(data[key][\"distance\"], data[key][\"energy\"]):\n #spline = CubicSpline(distance, energy)\n #xs = np.arange(0, 1, 0.01)\n #ax.plot(xs, spline(xs), color=\"blue\", alpha=0.5)\n ax.plot(distance, energy, color=\"blue\", alpha=0.2)\n ax.set_title(key)\n ax.set_ylim((-0.5, 1.6))\n ax.set_ylabel(\"Barrier Height (eV)\")\n ax.set_xlabel(\"Normalized Path Length\")\n filename = \"{}_barrier_height.png\".format(key)\n plt.savefig(filename)\n","repo_name":"seatonullberg/lammps-projects","sub_path":"calculations/vacancy-migration-energy/Fe75Cr25/analyze_barrier_height.py","file_name":"analyze_barrier_height.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"34210626228","text":"\"\"\"\nBlueprint for playing Skat and Doppelkopf online.\n\n\n\"\"\"\n\nimport random\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Dict\n\nfrom flask import Blueprint, render_template, request, redirect, current_app\n\ndoko_skat = Blueprint(\n \"doko_skat\",\n __name__,\n template_folder=\"templates\",\n static_folder=\"static\",\n static_url_path=\"/games/doko_skat/static\",\n)\n\n\ndef shuffle_cards(seed: str, nr: int, number_of_cards: int) -> List[int]:\n \"\"\"Return a shuffled list of cards.\n\n The cards are just integer number between 0, 1,..., number_of_cards-1.\n\n The seed is set so that during a game the same set of randomized\n cards are used.\n\n \"\"\"\n random.seed(seed + str(nr) + current_app.config[\"SECRET_SEED\"])\n cards = list(range(number_of_cards))\n random.shuffle(cards)\n return cards\n\n\ndef get_doko_cards(seed: str, nr: int, player: str) -> List[str]:\n \"\"\"For a given seed and player, return the players list of cards as png files.\"\"\"\n cards = shuffle_cards(seed, nr, 48)\n # pick the card for the player\n if player == \"A\":\n cards = cards[:12]\n elif player == \"B\":\n cards = cards[12:24]\n elif player == \"C\":\n cards = cards[24:36]\n elif player == \"D\":\n cards = cards[36:]\n # normalize to just the odd numbers, since we don't have duplicate png files\n cards = [2 * (c // 2) + 1 for c in cards]\n # the png files are sorted in the corrrect way, e.g. 0 is the ten of hearts, etc.\n cards = sorted(cards)\n cards = [\"doko/{}.png\".format(c) for c in cards]\n return cards\n\n\ndef get_skat_cards(seed: str, nr: int, player: str) -> List[str]:\n \"\"\"For a given seed and player, return the players list of cards as png files.\"\"\"\n cards = shuffle_cards(seed, nr, 32)\n if player == \"A\":\n cards = cards[:10]\n elif player == \"B\":\n cards = cards[10:20]\n elif player == \"C\":\n cards = cards[20:30]\n elif player == \"skat\":\n cards = cards[30:]\n # the png files are sorted in the corrrect way, e.g. 0 is the ten of hearts, etc.\n cards = sorted(cards)\n cards = [\"skat/{}.png\".format(c) for c in cards]\n return cards\n\n\ndef doko_sort_cards_by_suit(cards: List[str]) -> Tuple[List[str]]:\n clubs = [3, 11, 27, 29, 31, 33]\n spades = [5, 13, 35, 37, 39, 41]\n hearts = [1, 7, 15, 43, 45, 47]\n diamonds = [9, 17, 19, 21, 23, 25]\n\n clubs = [\"doko/{}.png\".format(n) for n in clubs]\n spades = [\"doko/{}.png\".format(n) for n in spades]\n hearts = [\"doko/{}.png\".format(n) for n in hearts]\n diamonds = [\"doko/{}.png\".format(n) for n in diamonds]\n\n my_clubs = [c for c in cards if c in clubs]\n my_spades = [c for c in cards if c in spades]\n my_hearts = [c for c in cards if c in hearts]\n my_diamonds = [c for c in cards if c in diamonds]\n\n return my_clubs, my_spades, my_hearts, my_diamonds\n\ndef select_game_type(game_type:str) -> Tuple[Path, Dict]:\n \"\"\"Return game specific settings.\"\"\"\n if game_type == \"doko\":\n db = Path(\"tmp\") / \"doko.db\"\n game = {\"title\": \"Doppelkopf\", \"link\": \"doko\"}\n else:\n db = Path(\"tmp\") / \"skat.db\"\n game = {\"title\": \"Skat\", \"link\": \"skat\"}\n return db, game\n\ndef tag_exists(tag:str, db: Path)->bool:\n \"\"\"Check if tag is in databse.\n\n Assumes a file based storage/db.\n \"\"\"\n if db.exists():\n with db.open(\"r\") as f:\n for l in f:\n if l.startswith(tag):\n return True\n return False\n\ndef add_tag(tag:str, db:Path)->None:\n \"\"\"Add a tag to the database.\"\"\"\n if db.exists():\n with db.open(\"a\") as f:\n f.write(\"{}\\n\".format(tag))\n\n\n@doko_skat.route(\"/\")\ndef doko(game_type=\"doko\"):\n \"\"\"Page to start a new game.\"\"\"\n\n _, game = select_game_type(game_type)\n\n return render_template(\"doko.html\", game=game)\n\n\n@doko_skat.route(\"////\")\ndef display_game(\n game_type=\"doko\",\n seed: str = None,\n nr: int = 1,\n):\n \"\"\"Game overview page.\n\n Show a page for the current game to see how already looked\n at their hand and who hasn't\n \"\"\"\n nr = int(nr)\n\n STORAGE, game = select_game_type(game_type)\n\n if game_type == \"doko\":\n players = {\"A\": False, \"B\": False, \"C\": False, \"D\": False}\n else:\n players = {\"A\": False, \"B\": False, \"C\": False, \"skat\": False}\n for player in players:\n tag = f\"{seed} {player} {nr}\"\n if tag_exists(tag, STORAGE):\n players[player] = True\n return render_template(\n \"doko-start.html\", seed=seed, nr=nr, players=players, game=game\n )\n\n@doko_skat.route(\"/\", methods=[\"POST\"])\ndef start_game(\n game_type=\"doko\",\n):\n \"\"\"Someone entered a new seesion name.\n\n Do some error checking on the seed and redirect to the first game\n \"\"\"\n seed = request.form[\"name\"].lower()\n seed = seed.replace(\" \", \"\")\n out = \"\"\n for s in seed:\n if s.isalnum():\n out += s\n seed = out\n return redirect(f\"/{game_type}/{seed}/1\")\n\n@doko_skat.route(\"////\")\ndef display_cards(\n game_type=\"doko\",\n seed: str = None,\n player: str = None,\n nr: int = 1,\n):\n \"\"\"Handle request from player to see cards.\n\n We write a tag into our database (just a text file) to see\n if the someone already requested the web page, if so we show\n an error, otherwise, we render the cards\n \"\"\"\n nr = int(nr)\n\n STORAGE, game = select_game_type(game_type)\n\n tag = f\"{seed} {player} {nr}\"\n if tag_exists(tag, STORAGE):\n return render_template(\"doko-single-error.html\", game=game)\n\n if game_type == \"doko\":\n cards = get_doko_cards(seed, nr, player)\n else:\n cards = get_skat_cards(seed, nr, player)\n\n suit = doko_sort_cards_by_suit(cards)\n # register page as visited\n add_tag(tag, STORAGE)\n\n return render_template(\n \"doko-game.html\", cards=cards, suit=suit, nr=nr, seed=seed, player=player, game=game\n )\n\n","repo_name":"arunpersaud/webgames","sub_path":"games/doko_skat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"32294455695","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nAuthor:Wen\nTime:2019年03月18日 22:22 \ntip-->兴趣是最好的老师<--\n\"\"\"\n# 输入一行字符,分别统计出其中英文字母、空格、数字和其它字符的个数。\n# print 默认输出是换行的,如果要实现不换行需要在变量末尾加上逗号 , !!!!!!\n\n# rawInput = input('输入任意一串字符:')\nrawInput = 'abc 1238A15.388DDsss+- //~'\n\nls = {\"num\":0,\"str\":0,\"blank\":0,\"other\":0}\n# 思路一:通过正则筛选,然后分别统计\n# 不能使用正则,因为不是匹配规则\n\n\n# 思路二:直接统计 通过asicc判断 通过字符串的单独方式判断数字、字母等\n# 通过asicc\n# print( 'c', ord('c'))\n\n# 通过字符串的特殊方法\n'''\ndef staticDatetype(lst):\n for c in lst:\n if c.isalpha():\n # ls.get('str') += 1 这里就会报错,因为字典的赋值不是这么操作的\n ls['str'] = ls.get('str')+1\n elif c.isdigit():\n ls['num'] = ls.get('num')+1\n elif c.isspace():\n ls['blank'] = ls.get('blank')+1\n else:\n ls['other'] = ls.get('other')+1\n\nstaticDatetype(rawInput)\nprint(ls)\n'''\n","repo_name":"uwenhao2008/python100-","sub_path":"python100题目/test17.py","file_name":"test17.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"12256570380","text":"\nimport asyncio\nfrom concurrent.futures import ThreadPoolExecutor\nimport random\nimport requests\nimport time\n\ndef unit_task(data):\n \"\"\"\n For example, this function can be the requests.get for http response.\n \"\"\"\n t = random.random() * 5\n time.sleep(t)\n print(f\"slept {t} seconds\")\n return data * t\n\nasync def batch_task(data, thread_pool):\n \"\"\"\n Since the unit_task is an ordinary function, executing it can occupy\n the event loop thread, block other asyncio tasks, and hence concurrency \n goes down to 1.\n In order to have some concurrency, use a thread pool to run the unit_task\n function.\n \"\"\"\n loop = asyncio.get_event_loop()\n futures = [\n loop.run_in_executor(thread_pool, unit_task, unit_data)\n for unit_data in data\n ]\n results = await asyncio.gather(*futures)\n return results\n\nif __name__ == \"__main__\":\n # concurrency is roughly the pool size\n thread_pool = ThreadPoolExecutor(10)\n results = []\n batch = []\n for x in range(100):\n batch.append(x)\n if len(batch) > 20:\n results.extend(asyncio.run(batch_task(batch, thread_pool)))\n batch.clear()\n print(results) \n","repo_name":"ecolss/adhoc","sub_path":"asyncio_examples.py","file_name":"asyncio_examples.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"36215187184","text":"import socket, ssl\n\ndef client_program():\n host = socket.gethostname() # as both code is running on same pc\n port = 5000 # socket server port number\n\n client_socket = socket.socket() # instantiate\n client_socket.connect((host, port)) # connect to the server\n\n\n # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # client_socket = ssl.wrap_socket(s,\n # ca_certs=\"cert.pem\",\n # cert_reqs=ssl.CERT_REQUIRED,\n # ssl_version=ssl.PROTOCOL_TLSv1)\n # client_socket.connect((host, port))\n\n\n message = input(\" -> \") # take input\n\n while message.lower().strip() != 'exit':\n client_socket.send(message.encode()) # send message\n data = client_socket.recv(1024).decode() # receive response\n\n print('Received from server: ' + data) # show in terminal\n\n message = input(\" -> \") # again take input\n\n client_socket.close() # close the connection\n\nif __name__ == '__main__':\n client_program()\n","repo_name":"noah22567/summitwoks","sub_path":"soapapi/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"27213702130","text":"import pprint\nimport functools\n\nseats = {}\n\nwith open(\"input/11.input\") as f:\n for y, row in enumerate(f.readlines()):\n for x, char in enumerate(row.strip()):\n print(y, x, char)\n seats[(y, x)] = char\n\n\ndef update(new, coord, old):\n occupied = 0\n for y in range(coord[0] - 1, coord[0] + 2):\n for x in range(coord[1] - 1, coord[1] + 2):\n if old.get((y, x), \".\") == \"#\":\n occupied += 1\n\n if old[coord] == \"#\" and occupied >= 5:\n new[coord] = \"L\"\n elif old[coord] == \"L\" and occupied == 0:\n new[coord] = \"#\"\n else:\n new[coord] = old[coord]\n\n return new\n\n\nwhile True:\n old_seats = seats\n seats = functools.reduce(lambda acc, cur: update(acc, cur, seats), seats, {})\n\n pprint.pprint(seats)\n print(\"*******\")\n\n if old_seats == seats:\n print(len([coord for coord in seats if seats[coord] == \"#\"]))\n break\n","repo_name":"asmundg/adventofcode","sub_path":"2020/11-a.py","file_name":"11-a.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"70231105568","text":"import frappe\nfrom frappe.model.document import Document\nfrom datetime import datetime, timedelta\n\nclass InterestAmountCalculation(Document):\n\t@frappe.whitelist() \n\tdef get_BalanceDetails(self):\n\t\ttodate=self.to_date\n\t\tfromdate=self.from_date\n\t\tdate_format = \"%Y-%m-%d\"\n\t\tstart_date = datetime.strptime(str(fromdate), date_format)\n\t\tend_date = datetime.strptime(str(todate), date_format)\n\t\tif start_date <= end_date:\n\t\t\tself.append(\"interest_rate_details\",\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\"from_date\": self.from_date,\n\t\t\t\t\t\t\t\t\t\t\"to_date\":self.to_date,\n\t\t\t\t\t\t\t\t\t},)\n\t\twhile start_date <= end_date:\n\t\t\tself.append(\"interest_calculation_details\",\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\"date\": start_date.strftime(date_format),\n\t\t\t\t\t\t\t\t},)\n\t\t\tstart_date += timedelta(days=1)\n\t\t\n\n\t@frappe.whitelist() \n\tdef get_Details(self):\n\t\tlst=[]\n\t\tbalance=0\n\t\tself.total_interest=0\n\t\tfor i in self.get('interest_calculation_details'):\n\t\t\tfor j in self.get('interest_rate_details'):\n\t\t\t\tif i.date>=j.from_date and i.date<=j.to_date:\n\t\t\t\t\ti.interest_rate=j.interest_rate\n\t\t\t\t\t# ac=frappe.db.get_list(\"Account\",fields=['name','root_type'],filters={'name':self.select_account})\n\t\t\t\t\t# for l in ac:\n\t\t\t\t\tcreditpe=frappe.db.get_list(\"Payment Entry\",fields=[\"name\",\"paid_amount\",\"paid_from\",\"paid_to\",\"paid_from_account_balance\",\"paid_to_account_balance\",\"posting_date\"],filters={'paid_from':self.select_account,'posting_date':i.date},limit=1)\n\t\t\t\t\tif creditpe:\n\t\t\t\t\t\tfor k in creditpe:\n\t\t\t\t\t\t\ttdate=k.posting_date\n\t\t\t\t\t\t\ti.credit=k.paid_amount\n\t\t\t\t\t\t\ti.balance=k.paid_from_account_balance-k.paid_amount #if l.root_type=='Asset' else k.paid_from_account_balance+k.paid_amount\n\t\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tdebitpe=frappe.db.get_list(\"Payment Entry\",fields=[\"name\",\"paid_amount\",\"paid_from\",\"paid_to\",\"paid_from_account_balance\",\"paid_to_account_balance\",\"posting_date\"],filters={'paid_to':self.select_account,'posting_date':i.date},limit=1)\n\t\t\t\t\tif debitpe:\n\t\t\t\t\t\tfor k in debitpe:\n\t\t\t\t\t\t\ttdate=k.posting_date\n\t\t\t\t\t\t\ti.debit=k.paid_amount\n\t\t\t\t\t\t\ti.balance=k.paid_to_account_balance+k.paid_amount #if l.root_type=='Asset' else k.paid_to_account_balance-k.paid_amount\n\t\t\t\t\t\n\t\t\t\t\tlst.append(i.balance)\n\t\t\t\t\tif i.credit==0 and i.debit==0:\n\t\t\t\t\t\tif len(lst)<2:\n\t\t\t\t\t\t\tbalance=lst[-1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ti.balance=lst[-2]\n\n\t\t\tif i.credit==0 and i.debit==0:\t\t\t\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tbalance=i.balance\n\t\tfor i in self.get('interest_calculation_details'):\n\t\t\tif i.credit==0 and i.debit==0 and i.date>=str(tdate):\n\t\t\t\ti.balance=balance\n\t\t\ti.interest_amount=(i.interest_rate*i.balance)/36500\n\t\t\t\n\t\tfor i in self.get('interest_calculation_details'):\n\n\t\t\tself.total_interest=self.total_interest+i.interest_amount\n\t\t\t\n\t\t\t\n\n\t\t\n\n\n\n","repo_name":"Pradip2113/interest_calculation","sub_path":"interest_calculation/interest_calculation/doctype/interest_amount_calculation/interest_amount_calculation.py","file_name":"interest_amount_calculation.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"12446064809","text":"import mido\nfrom mido import MidiFile\nfrom mido.midifiles import MidiTrack\n\nfrom note import Note\n\n\nclass Song:\n\n def __init__(self):\n self.notes = []\n self.counter = 0\n\n def read_file(self, filename):\n mid = MidiFile(filename)\n\n # An internal clock that keeps track of current time based on the\n # cumulative elapsed time delta of each note\n current_time = 0\n\n elapsed_time = 0\n\n for _, track in enumerate(mid.tracks):\n for message in track:\n # Increment internal clock\n current_time += message.time\n\n current_time\n\n if message.type == 'note_on':\n # Create a new note for this note_on (no time information\n # yet)\n note = Note(\n message.channel, message.note, message.velocity, elapsed_time, current_time)\n\n self.notes.append(note)\n\n elapsed_time = 0\n\n elif message.type == 'note_off':\n end_note = Note(\n message.channel, message.note, message.velocity)\n\n for note in reversed(self.notes):\n if note == end_note and note.duration == None:\n note.add_duration(current_time)\n break\n\n # If we haven't started a new note, we need to increment\n # the elapsed time since the last note\n if message.type != 'note_on':\n\n elapsed_time += message.time\n\n def write_file(self, filename=None):\n with MidiFile() as midi_song:\n unused_notes = []\n current_time = 0\n new_track = MidiTrack()\n new_track.append(mido.Message('program_change', channel=1, program=29, time=5))\n new_track.append(mido.Message('program_change', channel=2, program=30, time=5))\n new_track.append(mido.Message('program_change', channel=3, program=31, time=5))\n new_track.append(mido.Message('program_change', channel=4, program=32, time=5))\n new_track.append(mido.Message('program_change', channel=5, program=33, time=5))\n new_track.append(mido.Message('program_change', channel=6, program=34, time=5))\n new_track.append(mido.Message('program_change', channel=7, program=35, time=5))\n new_track.append(mido.Message('program_change', channel=8, program=36, time=5))\n new_track.append(mido.Message('program_change', channel=9, program=37, time=5))\n\n for note in self.notes:\n note.absolute_start = current_time + note.time_delta\n note_start = note.absolute_start\n\n best_end = float('inf')\n best_end_note = None\n\n for unused_note in unused_notes:\n this_end = unused_note.get_absolute_end()\n\n if this_end < best_end:\n best_end = this_end\n best_end_note = unused_note\n\n if best_end < note_start:\n new_track.append(\n best_end_note.get_note_off(best_end - current_time))\n unused_notes.remove(note)\n current_time = best_end\n else:\n new_track.append(note.get_note_on())\n unused_notes.append(note)\n current_time = note_start\n\n midi_song.tracks.append(new_track)\n midi_song.save(filename)\n\n def has_next_note(self):\n return self.counter < len(self.notes)\n\n def get_next_note_as_arff(self, n=5):\n \"\"\"\n Get the next n notes as a line in an arff file\n \"\"\"\n\n array = []\n\n # Bug - This assumes that there are more than n notes\n number_notes = n - max(n - self.counter, 0)\n\n for _ in range(max(n - self.counter, 0)):\n array += [1000, 1000, 1000, -1, -1]\n\n for note_index in reversed(range(number_notes)):\n array += self.notes[self.counter - note_index].get_note_array()\n\n self.counter += 1\n\n return array, self.notes[self.counter].get_note_array()\n\n def get_last_notes(self, n=5):\n\n array = []\n\n if n < len(self.notes):\n number_notes = n\n else:\n number_notes = len(self.notes)\n\n for _ in range(n - number_notes):\n array += [1000, 1000, 1000, -1, -1]\n\n for note_index in range(len(self.notes) - number_notes, len(self.notes)):\n array += self.notes[note_index].get_note_array()\n\n return array\n\n def get_arff_arrays(self):\n\n # TODO: Convert these all to numpy arrays.\n array_instrument = []\n array_note = []\n array_velocity = []\n array_duration = []\n array_time_delta = []\n\n for _ in range(len(self.notes) - 1):\n next_line, output = self.get_next_note_as_arff()\n array_instrument.append(next_line + [output[0]])\n array_note.append(next_line + [output[1]])\n array_velocity.append(next_line + [output[2]])\n array_duration.append(next_line + [output[3]])\n if output[4] != 0:\n array_time_delta.append(next_line + [output[4]])\n\n return array_instrument, array_note, array_velocity, array_duration, array_time_delta\n\n def add_note(self, note):\n self.notes.append(note)\n\n# song = Song()\n# song.read_file('../Rollinginthedeep.mid')\n# song.write_file('../Rollinginthedeep2.mid')\n","repo_name":"chrisranderson/music-generation","sub_path":"meka-technique/song.py","file_name":"song.py","file_ext":"py","file_size_in_byte":5617,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"88"} +{"seq_id":"6298570147","text":"def convert(value: float, fmt: str) -> float:\n \"\"\"Converts the value to the designated format.\n\n :param value: The value to be converted must be numeric or raise a TypeError\n :param fmt: String indicating format to convert to\n :return: Float rounded to 4 decimal places after conversion\n \"\"\"\n if not isinstance(value, (int, float)):\n raise TypeError(\"Input must be a float or an int!\")\n\n if fmt.lower() == \"cm\":\n return round(value * 2.54, 4)\n elif fmt.lower() == \"in\":\n return round(value * 0.39370079, 4)\n else:\n raise ValueError(\"Wrong format given!\")\n","repo_name":"pogross/bitesofpy","sub_path":"169/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"2890816282","text":"\nfrom gbahack.gbabin.bytes import ByteArrayReader\nfrom gbahack.resource import ResourceManager\n\nimport json\nimport os\nfrom array import array\n\nclass NoMetaDataException(Exception):\n pass\n\nclass RawFile(ByteArrayReader):\n def __init__(self, file):\n self.file = file\n self.f = None\n self.bytes = []\n self._resetfile()\n \n \n def __getitem__(self, v):\n return self.f[v]\n \n \n def _resetfile(self):\n self.bytes = open(self.file, 'rb').read() #read, adjust, in binary\n \n \n def path(self):\n '''Returns the path of the loaded file'''\n return self.file\n \n \n def trunc(self, offset, length, truncbyte=0xFF):\n '''Clears length bytes at a given offset in the ROM.\n Optional trunc byte can be set.'''\n c = array('B', [truncbyte] * length)\n self.writeArray(offset, c)\n \n \n def writeArray(self, offset, array):\n mf = open(self.file, 'r+b')\n mf.seek(offset)\n mf.write(array)\n mf.close()\n self._resetfile() \n \n \n def write(self, offset, data):\n '''Writes a bblock object to the ROM'''\n self.writeArray(offset, data.toArray())\n \n \n def writeBlocks(self, bblockarray):\n '''Writes a dict of bblocks (accompanied by a offset as index) to the ROM.'''\n for offset in bblockarray:\n self.write(offset, bblockarray[offset])\n \n def size(self):\n return len(self.bytes)\n \n\nclass ROM(RawFile):\n def __init__(self, filename, metadata=None):\n RawFile.__init__(self, filename)\n self.filename = filename\n \n self.metadata = {}\n if metadata != None:\n self.metadata = metadata\n else:\n self.loadMetaData()\n \n self.resourcemanger = ResourceManager(self)\n \n \n def loadMetaData(self):\n self.metadata = {}\n \n #try to find a metadata rom definition\n metafile = None\n if os.path.isfile(self.filename+\".metadata\"):\n metafile = self.filename+\".metadata\"\n elif os.path.isfile(os.path.splitext(self.filename)[0]+\".metadata\"):\n metafile = os.path.splitext(self.filename)[0]+\".metadata\"\n else:\n raise NoMetaDataException(\"No metadata file was found for this ROM!\")\n\n f = open(metafile, 'r')\n try:\n self.metadata = json.loads(f.read())\n except:\n print(\"Invalid metadata file. Should be in the JSON format!\")\n raise NoMetaDataException()\n finally:\n f.close()\n \n \n def getRM(self):\n '''Returns the resource manager attatched to the ROM.'''\n return self.resourcemanger\n \n \n def getName(self):\n if \"name\" in self.metadata:\n return self.metadata.name\n else:\n return \"Unknown\"\n","repo_name":"TheUnknownCylon/GBA-Pokemon-Hacking","sub_path":"gbahack/gbabin/rom.py","file_name":"rom.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"88"} +{"seq_id":"31228034923","text":"import 線性回歸3\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import linear_model\r\nfrom sklearn import datasets\r\nlinreg=linear_model.LinearRegression()\r\ndiabetes=線性回歸3.datasets\r\nx_train=線性回歸3.x_train\r\ny_train=線性回歸3.y_train\r\nx_test=線性回歸3.x_test\r\ny_test=線性回歸3.y_test\r\nx0_test=x_test[:,0]\r\nprint(x0_test)\r\nx0_train=x_train[:,0]\r\nx0_test=x0_test[:,np.newaxis] # 每個數據都當一筆資料\r\nprint(x0_test)\r\nx0_train=x0_train[:,np.newaxis]\r\nprint('對十個生理因素進行迴歸分析')\r\nplt.figure(figsize=(8,15))\r\nfor col in range(0,10):\r\n xi_test=x_test[:,col]\r\n xi_train=x_train[:,col]\r\n xi_test=xi_test[:,np.newaxis]\r\n xi_train=xi_train[:,np.newaxis]\r\n linreg.fit(xi_train,y_train)\r\n y=linreg.predict(xi_test)\r\n plt.subplot(5,2,col+1)\r\n plt.scatter(xi_test,y_test,color='k')\r\n plt.plot(xi_test,y,color='b',linewidth=3)\r\nplt.show()\r\n\r\n","repo_name":"ChenYH1994/Python","sub_path":"Linear_Regression/線性回歸4.py","file_name":"線性回歸4.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"13099210189","text":"import pandas as pd\nimport os\n\ndef list_and_select_csv_files():\n current_dir = os.getcwd()\n csv_files = [file for file in os.listdir(current_dir) if file.endswith('.csv') and file != 'SkyConditionData_cleaned.csv']\n\n if not csv_files:\n print(\"No CSV files found in the current directory.\")\n return None\n\n print(\"Select a CSV file to merge with:\")\n for i, file in enumerate(csv_files):\n print(f\"{i+1}. {file}\")\n\n while True:\n try:\n choice = int(input(\"Enter the number corresponding to the CSV file: \"))\n if 1 <= choice <= len(csv_files):\n selected_file = csv_files[choice - 1]\n return selected_file\n else:\n print(\"Invalid choice. Please enter a valid number.\")\n except ValueError:\n print(\"Invalid input. Please enter a number.\")\n\n# Prompt user to select a CSV file\nselected_data = list_and_select_csv_files()\nif selected_data:\n print(f\"You selected: {selected_data}\")\n\n # Read the selected CSV file\n selected_data = pd.read_csv(selected_data)\n\n # Read SkyConditionData_cleaned.csv\n sky_data = pd.read_csv('SkyConditionData_cleaned.csv')\n\n # Convert Date/Time columns to datetime format\n selected_data['Date/Time'] = pd.to_datetime(selected_data['Date/Time'], format=\"%y-%m-%d %I:%M:%S %p\")\n sky_data['Datetime'] = pd.to_datetime(sky_data['Datetime'])\n\n # Round up the datetime to the next hour for selected_data\n selected_data['Date/Time'] = selected_data['Date/Time'].dt.ceil('H')\n\n # Perform the join operation\n merged_data = pd.merge(selected_data, sky_data, left_on='Date/Time', right_on='Datetime', how='inner')\n\n # Drop the 'Datetime' column as it's redundant\n merged_data.drop('Datetime', axis=1, inplace=True)\n\n # Rename the 'Date/Time' column to 'Datetime'\n merged_data.rename(columns={'Date/Time': 'Datetime'}, inplace=True)\n\n # Sort the rows chronologically based on 'Datetime'\n merged_data.sort_values('Datetime', inplace=True)\n\n # Reset the index\n merged_data.reset_index(drop=True, inplace=True)\n\n # Save the merged data to ground_weather_merged.csv\n merged_data.to_csv('ground_weather_merged.csv', index=False)\n\n print(\"Merge completed. Merged data saved to ground_weather_merged.csv.\")\nelse:\n print(\"No CSV file selected.\")\n","repo_name":"Fraolabebe/Team-SkySci-Analytics","sub_path":"Metadata Merge Transform/Merge_Metadata_Transform.py","file_name":"Merge_Metadata_Transform.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"37016586742","text":"class Solution:\n def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:\n lis=[]\n newlis=[]\n for i in range(len(points)):\n lis.append(((points[i][0])**2)+((points[i][1])**2))\n lis.sort()\n maxx = lis[k-1]\n for i in range(len(points)):\n if ((points[i][0])**2)+((points[i][1])**2)<=maxx:\n newlis.append(points[i])\n return newlis\n ","repo_name":"ben-on/A2SV","sub_path":"973. K Closest Points to Origin.py","file_name":"973. K Closest Points to Origin.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"13450241878","text":"import tensorflow as tf\nimport numpy as np\nimport keras\nfrom keras.datasets import cifar100\nfrom keras.models import Sequential,Model\nfrom keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D,Input\nfrom keras import backend as K\nfrom keras.callbacks import TensorBoard\nfrom keras.optimizers import SGD,Adam\nfrom keras.applications import VGG16\nimport matplotlib.pylab as plt\n\nbatch_size=100\nnum_classes=100\nepochs=5\n\n(x_train,y_train),(x_test,y_test)= cifar100.load_data()\n\n_,filas, columnas, canales = x_train.shape #32x32x3\n\n#Convertir el formato a float32\nx_train=x_train.astype('float32')\nx_test=x_test.astype('float32')\n\n#Normalizacion\nx_train=x_train/255\nx_test=x_test/255\n\n#Categorizar (Codificacion OneHot)\ny_train=keras.utils.to_categorical(y_train,num_classes)\ny_test=keras.utils.to_categorical(y_test,num_classes)\n\n\nEntradas=Input(shape=(filas,columnas,canales))\nx = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(Entradas)\n#x=Dropout(0.25)(x)\nx = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\nx = MaxPooling2D((2, 2), name='block1_pool')(x)\n\nx = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n#x=Dropout(0.25)(x)\nx = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\nx = MaxPooling2D((2, 2), name='block2_pool')(x)\n\n#Fully Conected/RNA\nx=Flatten()(x)\nx=Dense(512,activation='relu')(x)\nx=Dropout(0.5)(x) #Regularizar para evitar el overffiting. Hay 512 neuronas. Dropoout evitara el overfitting\nx=Dense(num_classes,activation='softmax')(x) #Softmax obtiene una probabilidad en 0% y 100%\n\nmodelo = Model(inputs=Entradas, outputs=x)\n#modelo.summary()\n\nAdam = Adam(lr=0.001,beta_1=0.9,beta_2=0.9)#SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\n\nmodelo.compile(loss=keras.losses.categorical_crossentropy,optimizer=Adam,metrics=['categorical_accuracy'])\n\nhistory=modelo.fit(x_train,y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test,y_test))#history, saca el historial del entrenamiento, como ha ido variando por epocas.\n\npuntuacion=modelo.evaluate(x_test,y_test,verbose=1)\n\nprint(puntuacion)\n\n#Graficando el historial del entrenamiento\nplt.figure(1)\nplt.plot(history.history['categorical_accuracy'])\nplt.plot(history.history['val_categorical_accuracy'])\nplt.title('Precision de Modelo')\nplt.ylabel('Precision')\nplt.xlabel('Epocas')\nplt.legend(['Entrenamiento', 'Test'], loc='upper left')\n\n\nplt.figure(2)\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Perdidas del Modelo')\nplt.ylabel('Perdidas')\nplt.xlabel('Epocas')\nplt.legend(['Entrenamiento', 'Test'], loc='upper left')\nplt.show()\n","repo_name":"cruz-victor/DeepLearningKerasTensorflow","sub_path":"red_neuronal_convolucional_cifar1000_analisis_grafico.py","file_name":"red_neuronal_convolucional_cifar1000_analisis_grafico.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"29885474689","text":"\"\"\"\nThread synchronised log recorder for FlatTrack\n\"\"\"\nimport datetime\nimport os\nimport traceback\nfrom threading import RLock\n\n\nclass LogContext(object):\n \"\"\"\n A Logger Class\n \"\"\"\n\n def __init__(self):\n self.lock = RLock()\n self.count = 0\n self.maxwrite = 3000\n folder = os.path.abspath(os.path.dirname(__file__))\n self.filename = os.path.join(os.path.dirname(folder), \"plugin.log\")\n\n def set_filename(self, filename):\n self.filename = filename\n\n def rotate_log(self):\n \"\"\"\n Rotate the current log file\n :return:\n \"\"\"\n with self.lock:\n try:\n self.count = 0\n filename = self.filename\n moved = filename + \".old\"\n\n if os.path.exists(moved):\n os.unlink(moved)\n if os.path.exists(filename):\n os.rename(filename, moved)\n except Exception as err:\n print(\"rotate_log failed..: {}\".format(err))\n\n self.write(\"Log rotated to {}\".format(moved))\n\n def write(self, message):\n \"\"\"\n Write a log message\n :param message:\n :return:\n \"\"\"\n with self.lock:\n self.count += 1\n\n timestamp = datetime.datetime.utcnow().isoformat()\n try:\n with open(self.filename, \"a\") as logfile:\n logfile.write(timestamp + \" \")\n logfile.write(message)\n logfile.write(\"\\n\")\n except Exception as err:\n print(str(err))\n\n if self.count > self.maxwrite:\n self.rotate_log()\n\n\nLOG = LogContext()\n","repo_name":"inorton/EDMCHits","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"88"} +{"seq_id":"75000925087","text":"from pygame.sprite import Group\n\nclass Settings(object):\n\t\"\"\"docstring for Settings\"\"\"\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.jewels = Group()\n\t\tself.isFirstIteration = True\n\t\tself.screenWidth = 720\n\t\tself.screenHeight = 720\n\t\tself.backgroundColor = (250, 250, 250)\n\n\t\t# Jewel settings\n\t\tself.jewelWidth = 30\n\t\tself.jewelHeight = 20\n\t\tself.jewelVerticalOrHorizontal = 1 # vertical = 0; Horizontal = 1\n\t\tself.colorOfJewels = 4\n\t\tself.jewelType = 1\n\t\tself.jewelSpeedFactor = 20\n\t\tself.jewelsLimit = 4\n\t\tself.jewelDirection = 1 # 1 - moving right i.e. value of x-coordinate should increase / -1 corresponds to moving left, therefore x should decrease\n\t\tself.jewelMovingRight = False\n\t\tself.jewelMovingLeft = False\n\t\tself.anyJewelReachedEdge = False\n\t\tself.anyJewelReachedBottom = False\n\t\tself.numberOfJewelsInEachIteration = 0\n\t\tself.allTheJewelsReachedBottom = False\n \n\t\t\n\t\tself.probableXCoordinates = []\n\t\tself.listOfJewels = []\n\t\tself.colorOfTheEmptyRect = (250, 250, 250, 255)\n\n\n\t\t","repo_name":"Jay0505/JewelsGame","sub_path":"JewelSettings.py","file_name":"JewelSettings.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"12976288088","text":"import streamlit as st\nimport numpy as np\nfrom skimage import io\nfrom numpy import asarray\nfrom PIL import Image\nimport PIL\nimport os\nfrom PIL import Image, ImageOps\nimport numpy as np\nimport cv2\nfrom skimage import morphology, exposure\n\nst.title(\"📷 Diamond Extractor 📷\")\n\ndef load_image(image_file):\n img = Image.open(image_file)\n return img\n\n\n\n\ndef main():\n st.subheader(\"Dataset Credit: D360 Tech\")\n #menu = [\"Image\",\"Dataset\",\"DocumentFiles\",\"About\"]\n choice = \"Image\"\n if choice == \"Image\":\n st.subheader(\"Image\")\n image_file = st.file_uploader(\"Upload Your Image\", type=['jpg', 'png', 'jpeg'])\n if not image_file:\n return None\n\n original_image = Image.open(image_file)\n img = np.array(original_image)\n\n xy = cv2.Canny(img, 30, 170, 3)\n xy = cv2.dilate(xy, (2,2), iterations = 3)\n\n h, w = xy.shape[:2]\n mask = np.zeros((h+2, w+2), np.uint8)\n xyc = xy.copy()\n cv2.floodFill(xyc, mask, (0,0), 255.0)\n xyc = cv2.bitwise_not(xyc)\n\n xy = xy | xyc\n\n xy = cv2.medianBlur(xy, 3)\n xy = cv2.GaussianBlur(xy, (0, 0), 1, 1)\n xy = cv2.morphologyEx(xy, cv2.MORPH_CLOSE, kernel = (5,5))\n\n h, w = xy.shape[:2]\n mask = np.zeros((h+2, w+2), np.uint8)\n xyc = xy.copy()\n cv2.floodFill(xyc, mask, (0,0), 255.0)\n xyc = cv2.bitwise_not(xyc)\n\n xy = xy | xyc\n\n c, h = cv2.findContours(image=xy, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)\n sort_contour = sorted(c, key = cv2.contourArea, reverse = True)\n largest_contour = sort_contour[0]\n\n ab = np.empty(xy.shape)\n\n xy = cv2.drawContours(ab, contours = largest_contour, contourIdx = -1, color = 1, thickness = -1)\n xy[xy<0.000001] = 0\n xy[xy > 0.999999] = 255.0\n xy = np.uint8(xy)\n np.unique(xy)\n\n\n h, w = xy.shape[:2]\n mask = np.zeros((h+2, w+2), np.uint8)\n xyc = xy.copy()\n cv2.floodFill(xyc, mask, (0,0), 255.0)\n xyc = cv2.bitwise_not(xyc)\n xy = xy | xyc\n\n # img1 = io.imread(original_image, as_gray=True)\n #img1 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n seg_im = cv2.bitwise_and(img, img, mask = xy)\n\n\n seg_im[seg_im < 35] = 255\n\n\n # xy = cv2.Canny(img, 30, 170, 3)\n # xy = cv2.dilate(xy, (2,2), iterations = 3)\n\n # h, w = xy.shape[:2]\n # mask = np.zeros((h+2, w+2), np.uint8)\n # xyc = xy.copy()\n # cv2.floodFill(xyc, mask, (0,0), 255.0)\n # xyc = cv2.bitwise_not(xyc)\n\n # xy = xy | xyc\n\n # xy = cv2.medianBlur(xy, 3)\n # xy = cv2.GaussianBlur(xy, (0, 0), 1, 1)\n # xy = cv2.morphologyEx(xy, cv2.MORPH_CLOSE, kernel = (5,5))\n\n # h, w = xy.shape[:2]\n # mask = np.zeros((h+2, w+2), np.uint8)\n # xyc = xy.copy()\n # cv2.floodFill(xyc, mask, (0,0), 255.0)\n # xyc = cv2.bitwise_not(xyc)\n\n # xy = xy | xyc\n # seg_im = cv2.bitwise_and(img, img, mask = xy)\n\n\n # seg_im[seg_im < 35] = 255\n\n\n \n image = PIL.Image.fromarray(seg_im, \"RGB\")\n\n st.subheader(\"Original uploaded Image :\")\n st.image(original_image,width=400)\n\n st.subheader(\"Output Image [with white background]:\")\n st.image(image,width=400)\n #image = Image.open(image) #Image name\n #fig = plt.figure()\n #plt.imshow(image)\n #plt.axis(\"off\")\n #st.pyplot(fig)\n\n # if image is not None:\n # with open(os.path.join(\"tempDir\",image.name),\"wb\") as f: \n # f.write(image.getbuffer()) \n # st.success(\"Saved File\")\n\n\n # with open(image_file.name, \"rb\") as file:\n # btn = st.download_button(\n # label=\"Download image\",\n # data=file,\n # file_name=image.name,\n # #mime=\"image/png\"\n # )\n # #st.image(load_image(mask))\n\n\nif __name__ == '__main__':\n \tmain()\n\n\n\n","repo_name":"DhruvinK-06/Diamond-Image-Segmentation","sub_path":"Website/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"20764825313","text":"import xss_scanner\r\nimport keylogger\r\nimport sys\r\nimport getopt\r\n\r\nargument_list = sys.argv[1:]\r\noptions = \"hkxf:e:u:\"\r\nlong_options = [\"help\",\"kelogger\",\"xss-scanner\",\"file\",\"email\",\"url\"]\r\n\r\ntry:\r\n arguments, values = getopt.getopt(argument_list, options, long_options)\r\n\r\n for currentArgument, currentValue in arguments:\r\n if currentArgument in (\"-h\",\"--help\"):\r\n print(\" VulnKey \\n\")\r\n print(\"-k or --keylogger - for keylogger\")\r\n print(\"-x or --xss-scanner - for xss scanner\")\r\n print(\"\\nFor the help menu of respective programs, add -h, Example: python vulnkey.py -k -h\")\r\n print(\"Example commands: python vulnkey.py -x -h\")\r\n print(\"Example commands: python vulnkey.py -xss-scanner -u [url]\")\r\n print(\"Example commands: python vulnkey.py -keylogger -e [email]\")\r\n exit()\r\n\r\n if currentArgument in (\"-k\", \"--keylogger\"):\r\n try:\r\n keylogger = keylogger.Keylogger()\r\n keylogger.start()\r\n except KeyboardInterrupt:\r\n exit()\r\n\r\n if currentArgument in (\"-x\",\"--xss-scanner\"):\r\n try:\r\n scanner = xss_scanner.Scanner()\r\n scanner.run()\r\n except KeyboardInterrupt:\r\n print(\"^C\")\r\n exit()\r\n\r\nexcept getopt.error as err:\r\n print(str(err))\r\n exit()\r\n\r\n \r\n","repo_name":"rainbow-hue/VulnKey","sub_path":"vulnkey.py","file_name":"vulnkey.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"88"} +{"seq_id":"8020067996","text":"#import numpy as np\n\nimport cv2\nimport math\nimport numpy as np\nfrom pathlib import Path\nfrom itertools import chain\n\nfrom process_classes.utils import *\nfrom tracker_utils import mkdir_children\n\nclass Alarm_line_Write_to_file(calculation_object):\n def __init__(self, opt, opt_local, parent):#\n super().__init__()\n self.parent = parent\n if parent is None: raise ValueError(\"Несоответствующее значение parent\")\n self.scenario_id = parent.get_scenario_id()\n self.opt_global = opt\n self.opt_local = opt_local\n self.parse_opt()\n \n def update(self, opt_alg):\n alarm_tr = opt_alg[\"alarm\"]\n if len(alarm_tr) > 0:\n self.save_log(alarm_tr)\n return opt_alg\n\n def get_displayed(self, displayed = {}):#\n return displayed\n\n def save_log(self, alarm_tr):\n\n tm_str = \"\"\n if self.LogFileName.exists():\n with open(self.LogFileName) as fr:\n tm_str = fr.read()\n\n for alarm in alarm_tr:\n if alarm[\"scenario id\"] != self.scenario_id: continue\n im_file = \"\"\n if self.SaveEventsImages:\n im_file = Path(self.opt_global[\"VideoFile\"])\n im_file = self.ImagesDir / (str(self.LogFileName.stem) + \"_fr\" + str(alarm[\"frames\"]) +\\\n \"_ms\" + str(int(alarm[\"frames\"] * 1000.0 / self.opt_global[\"fps\"])) +\\\n \"_tr\"+ str(alarm[\"tracks id\"]) + \".png\") #(str(im_file.stem) + \"_\" + str(self.LogFileName.stem) + str(alarm[\"tracks id\"]) + \".png\")\n self.save_img(str(im_file), alarm[\"image\"])\n \n tm_str += str(int(alarm[\"frames\"] * 1000.0 / self.opt_global[\"fps\"])) + \";\" + str(im_file) + \"\\n\"\n\n with open(self.LogFileName, 'w') as fw:\n fw.write(tm_str)\n\n def save_img(self, file, im):\n cv2.imwrite(file,im)\n\n def parse_opt(self):#\n self.LogFileName = Path(self.opt_local.get(\"LogFileName\", \"log.test\"))\n delete_file(self.LogFileName)\n with open(self.LogFileName, 'w') as _:\n pass\n self.ImagesDir = Path(self.opt_local.get(\"ImagesDir\", self.LogFileName.parent))\n mkdir_children(self.ImagesDir)\n self.SaveEventsImages = self.opt_local.get(\"SaveEventsImages\", True)","repo_name":"PropovedNik007/train","sub_path":"process_classes/Alarm_line_Write_to_file.py","file_name":"Alarm_line_Write_to_file.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"71655612767","text":"import logging\n\nfrom telegram.ext import CommandHandler\n\nfrom bot.constants import MINUTE\nfrom bot.utils import datetime_from_answer, init_reminder_context, _setup_reminder_and_reply\n\nlogger = logging.getLogger(__name__)\n\ndef quicky(update, context):\n user_offset = context.user_data.get('offset')\n msg = update.message\n\n if not user_offset:\n msg.reply_text('Please first set your current time with /setmytime')\n return\n if not context.args:\n msg.reply_text(\"Mmm not like that\\n/q buy something, 20\")\n return\n\n to_remind, sep, minutes = ' '.join(context.args).rpartition(',')\n if not to_remind:\n msg.reply_text(\"Please add a *comma* and delay time. i.e /q charge phone*,* 20\", parse_mode='markdown')\n return\n try:\n requested_delay = int(minutes.strip()) * MINUTE\n except ValueError:\n msg.reply_text(\"Delay must be in minutes. i.e 60\")\n return\n\n when = datetime_from_answer(requested_delay)\n job_context = init_reminder_context(\n to_remind, msg.from_user, msg.chat_id, user_offset, remind_date_iso=when.isoformat()\n )\n logger.info('Setting up a new reminder')\n try:\n _setup_reminder_and_reply(update, context.job_queue, job_context, when)\n except Exception:\n logger.exception('Error writing reminder')\n msg.reply_text(\"I'm not perfect ¯\\\\_(ツ)_/¯\")\n\nquick_reminder = CommandHandler('q', quicky)\n","repo_name":"Ambro17/RemindersBot","sub_path":"bot/handlers/quick.py","file_name":"quick.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"88"} +{"seq_id":"70509730848","text":"#!/usr/bin/env python3\n'''\nImplement String#ipv4_address?, which should return true if given object is an IPv4 address - four numbers (0-255) separated by dots.\n\nIt should only accept addresses in canonical representation, so no leading 0s, spaces etc.\n'''\ndef ipv4_address(address):\n a = address.split('.')\n if len(a) != 4:\n return False\n for x in a:\n try:\n if not x.isdigit():\n return False\n if int(x) < 0 or int(x) > 255:\n return False\n if str(int(x)) != x:\n return False\n except:\n return False\n return True\n","repo_name":"k-unker/codewars_katas","sub_path":"is_it_ipv4_address.py","file_name":"is_it_ipv4_address.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"7028340247","text":"from domain.car import Car\nfrom domain.car_wash import CarWash\nfrom service.service import Service\nfrom repository.repo_car_wash import RepoCarWash\nfrom repository.repo_car import RepoCar\n\n\ndef test_observer():\n car_list = RepoCar()\n car_wash_list = RepoCarWash()\n car = Car(1, \"AG 67 NIG\", \"Radu\")\n car_wash = CarWash(1, \"Geani's\")\n car.add_observer(car_wash)\n car_list.store(car)\n assert len(car_list.get_all()) == 1\n car_wash_list.store(car_wash)\n car_wash_list.get(1).add_car(car.get_id())\n assert len(car_wash_list.get(1).get_cars()) == 1\n car_list.delete(car.get_id())\n assert len(car_list.get_all()) == 0\n assert len(car_wash_list.get(1).get_cars()) == 0\n\n\n","repo_name":"radu9917/Car_Wash","sub_path":"tests/test_observer.py","file_name":"test_observer.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"2035323713","text":"from typing import List, Tuple, Text, Optional\n\nsign = lambda x: 1 if x > 0 else (-1 if x < 0 else 0)\n\nDIRECTION_MAP = {\"U\": (0, 1), \"D\": (0, -1), \"R\": (1, 0), \"L\": (-1, 0)}\n\n\ndef rope_positions(\n moves: List[Tuple[Text, int]], tail_length: Optional[int] = 1\n) -> int:\n \"\"\"\n Partial solution taken from: https://github.com/nthistle/advent-of-code/blob/master/2022/day09/day09_p2.py\n \"\"\"\n tail_positions = set()\n snake_size = tail_length + 1\n snake = [[50, 50] for _ in range(snake_size)]\n tail_positions.add(tuple(snake[-1]))\n for direction, distance in moves:\n for _ in range(distance):\n delta_x, delta_y = DIRECTION_MAP[direction]\n snake[0][0] += delta_x\n snake[0][1] += delta_y\n for i in range(1, len(snake)):\n head_x, head_y = snake[i - 1]\n tail_x, tail_y = snake[i]\n delta_x = tail_x - head_x\n delta_y = tail_y - head_y\n if (abs(delta_x), abs(delta_y)) == (1, 1):\n continue\n elif delta_x == 0 or delta_y == 0:\n if abs(delta_x) >= 2:\n snake[i][0] -= sign(delta_x)\n if abs(delta_y) >= 2:\n snake[i][1] -= sign(delta_y)\n else:\n snake[i][0] -= sign(delta_x)\n snake[i][1] -= sign(delta_y)\n tail_positions.add(tuple(snake[-1]))\n return len(tail_positions)\n","repo_name":"stuart-bradley/advent_of_code","sub_path":"year_2022/challenge_9/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"38402170638","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nfrom hashlib import sha256\nfrom os import environ as ENV\n\nfrom shrine.init import WORKING_DIR\n\nPROJECT_PATH = lambda *path: os.path.join(WORKING_DIR, *path)\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n)\n\nMANAGERS = ADMINS\n\nPORT = int(ENV.get(\"PORT\", 8000))\nPRODUCTION = False\n\nDATABASES = {\n}\n\nSESSION_ENGINE = 'django.contrib.sessions.backends.db'\n\nSECRET_KEY = sha256(os.getenv('PATH')).hexdigest()\n\nTIME_ZONE = \"America/New_York\"\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = \"en-us\"\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n\nROOT_URLCONF = \"shrine.routes\"\n\n# Python dotted path to the WSGI application used by Django\"s runserver.\nWSGI_APPLICATION = \"shrine.routes.wsgi\"\n\nTEMPLATE_PATH = PROJECT_PATH(\"templates\")\n\nINSTALLED_APPS = (\n \"shrine.apps.core\",\n)\nBROKER_BACKEND = \"django\"\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"filters\": {\n \"require_debug_false\": {\n \"()\": \"django.utils.log.RequireDebugFalse\"\n }\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"INFO\",\n \"class\": \"logging.StreamHandler\",\n },\n \"mail_admins\": {\n \"level\": \"ERROR\",\n \"filters\": [\"require_debug_false\"],\n \"class\": \"django.utils.log.AdminEmailHandler\"\n }\n },\n \"loggers\": {\n \"django.request\": {\n \"handlers\": [\"mail_admins\"],\n \"level\": \"ERROR\",\n \"propagate\": True,\n },\n \"django.request\": {\n \"handlers\": [\"mail_admins\"],\n \"level\": \"ERROR\",\n \"propagate\": True,\n },\n\n }\n}\n\nEMAIL_BACKEND = \"django.core.mail.backends.filebased.EmailBackend\"\nMAILGUN_ACCESS_KEY = \"some-invalid-key\"\nMAILGUN_SERVER_NAME = \"some-invalid-server-name\"\n\nEMAIL_FILE_PATH = PROJECT_PATH(\".messages\")\nTORNADO_CONFIG = {}\nFORCE_TRACEBACK = False\n","repo_name":"gabrielfalcao/shrine","sub_path":"shrine/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"11603820434","text":"from flask import current_app, g\n\ndef set_page_title(title=None, prefix=None, suffix=None, separator='-', separate_with_spaces=True):\n\tif not title:\n\t\ttitle = current_app.config['TITLE']\n\tif separate_with_spaces:\n\t\tseparator = ' ' + separator + ' '\n\tif prefix:\n\t\ttitle = '{prefix}{separator}{title}'.format(prefix=prefix, separator=separator, title=title)\n\tif suffix:\n\t\ttitle = '{title}{separator}{suffix}'.format(title=title, separator=separator, suffix=suffix)\n\tg.title = title\n\treturn g.title\n\ndef get_page_title():\n\tif g.get('title'):\n\t\treturn g.title\n\treturn current_app.config['TITLE']\n","repo_name":"steven-mercatante/Flask-sandbox","sub_path":"app/helpers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"36859420735","text":"import cv2\nimport face_recognition\n\nehsan = face_recognition.load_image_file('/home/python/PycharmProjects/OpenCV/tutorial/face data set/alikhani1.jpeg')\nehsan = cv2.cvtColor(ehsan,cv2.COLOR_BGR2RGB)\namin = face_recognition.load_image_file('/home/python/PycharmProjects/OpenCV/tutorial/face data set/amin1.jpg')\namin = cv2.cvtColor(amin,cv2.COLOR_BGR2RGB)\nroya = face_recognition.load_image_file('/home/python/PycharmProjects/OpenCV/tutorial/face data set/roya1.jpg')\nroya = cv2.cvtColor(roya,cv2.COLOR_BGR2RGB)\nTest = face_recognition.load_image_file('/home/python/PycharmProjects/OpenCV/tutorial/face data set/asrjadid.jpg')\nTest = cv2.cvtColor(Test,cv2.COLOR_BGR2RGB)\n \nfaceLocehsan= face_recognition.face_locations(ehsan)[0]\nencodeehsan = face_recognition.face_encodings(ehsan)[0]\ncv2.rectangle(ehsan,(faceLocehsan[3],faceLocehsan[0]),(faceLocehsan[1],faceLocehsan[2]),(255,0,255),2)\n \nfaceLocamin= face_recognition.face_locations(amin)[0]\nencodeamin = face_recognition.face_encodings(amin)[0]\ncv2.rectangle(amin,(faceLocamin[3],faceLocamin[0]),(faceLocamin[1],faceLocamin[2]),(255,0,255),2)\n\nfaceLocroya= face_recognition.face_locations(roya)[0]\nencoderoya = face_recognition.face_encodings(roya)[0]\ncv2.rectangle(roya,(faceLocroya[3],faceLocroya[0]),(faceLocroya[1],faceLocroya[2]),(255,0,255),2)\n\nfaceLocTest= face_recognition.face_locations(Test)[0]\nencodeTest = face_recognition.face_encodings(Test)[0]\ncv2.rectangle(Test,(faceLocTest[3],faceLocTest[0]),(faceLocTest[1],faceLocTest[2]),(255,0,255),2)\n\nresults = face_recognition.compare_faces([encodeehsan,encodeamin,encoderoya],encodeTest)\nfaceDis = face_recognition.face_distance([encodeehsan,encodeamin,encoderoya],encodeTest)\nprint(results,faceDis)\ncv2.putText(ehsan,f'{results[0]} {round(faceDis[0],2)}',(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)\ncv2.putText(amin,f'{results[1]} {round(faceDis[1],2)}',(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)\ncv2.putText(roya,f'{results[2]} {round(faceDis[2],2)}',(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,0,255),2)\n \ncv2.imshow('ehsan',ehsan)\ncv2.imshow('amin',amin)\ncv2.imshow('roya',roya)\ncv2.imshow('test',Test)\ncv2.waitKey(0)\n","repo_name":"HadiSArab/OpenCV","sub_path":"face recognition/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"88"} +{"seq_id":"27650515088","text":"# -*- coding: utf-8 -*-\n\n\n\nfrom dal import autocomplete\nfrom django.apps import apps\nfrom django.contrib.auth.decorators import login_required\nfrom django.db.models import Q, TextField\nfrom django.db.models.expressions import RawSQL\nfrom django.db.models.functions import Cast\nfrom django.shortcuts import render\n\nfrom .models import Pessoa, ContaBancaria\n\n\n# Create your views here.\n\nclass PessoaAutocomplete(autocomplete.Select2QuerySetView):\n pessoa = \"A\" # Ambos os tipos de pessoa\n\n def get_queryset(self):\n # Don't forget to filter out results depending on the visitor !\n # if not self.request.user.is_authenticated:\n # return Pessoa.objects.none()\n\n query = \"\"\"SELECT pf.cpf FROM cliente_pessoafisica AS pf WHERE pf.pessoa_ptr_id = cliente_pessoa.id\n UNION \n SELECT pj.cnpj FROM cliente_pessoajuridica AS pj WHERE pj.pessoa_ptr_id=cliente_pessoa.id\"\"\"\n\n qs = Pessoa.objects.annotate(cpfcnpj=RawSQL(query, ()))\n\n if self.pessoa != 'A':\n # qs = Pessoa.objects.filter(tipo=self.pessoa)\n qs = qs.filter(tipo=self.pessoa)\n\n if self.q:\n fil = Q(nome__icontains=self.q) | Q(cpfcnpj__icontains=self.q)\n if self.q.isdigit():\n fil |= Q(id=self.q)\n qs = qs.filter(fil)\n\n return qs.order_by('nome')\n\n\n# @login_required\nclass ContaBancariaAutocomplete(autocomplete.Select2QuerySetView):\n def get_queryset(self):\n # if not self.request.user.is_authenticated:\n # return Pessoa.objects.none()\n\n beneficiario = self.forwarded.get('beneficiario', None)\n\n if not beneficiario:\n return ContaBancaria.objects.all()\n\n qs = ContaBancaria.objects.annotate(s_banco=Cast('banco__pk', TextField()),\n s_agencia=Cast('agencia', TextField()),\n s_conta=Cast('conta', TextField()))\n\n qs = qs.filter(pessoa__pk=beneficiario)\n\n if self.q:\n fil = Q(s_banco__icontains=self.q) | Q(s_agencia__icontains=self.q) | Q(s_conta__icontains=self.q)\n qs = qs.filter(fil)\n\n return qs.order_by('banco__pk', 'agencia', 'conta')\n\n\n@login_required\n# @permission_required('common.view_municipio', raise_exception=True)\ndef municipios_ajax(request, uf, app_label, object_name):\n \"\"\"\n Views Ajax para pesquisa de municípios, usado na lista de formado tabela.\n \"\"\"\n # /common/municipio/ajax/GO/common/Municipio/ => uf=GO app_label=cliente, object_name=Municipio\n model_cls = apps.get_model(app_label, object_name) # apps.get_model(cliente, Municipio)\n municipio_list = model_cls.objects.filter(Q(uf=uf)).order_by('nome')\n return render(request, \"%s/municipios_options.html\" % app_label, {\"municipio_list\": municipio_list})\n\n\n@login_required\n# @permission_required('common.add_cep', raise_exception=True)\ndef cep_ajax(request, pk, app_label, object_name):\n \"\"\"\n Views Ajax para pesquisa de CEPs, usado na lista de formado tabela.\n \"\"\"\n # /common/cep/ajax/21020-122/common/CEP/ => uf=GO app_label=common, object_name=Municipio\n model_cls = apps.get_model(app_label, object_name) # apps.get_model(common, Municipio)\n cep_list = model_cls.objects.filter(Q(pk=pk)).order_by('pk')\n return render(request, \"%s/cep_options.html\" % app_label, {\"cep_list\": cep_list})\n\n\n@login_required\n# @permission_required('common.view_municipio', raise_exception=True)\ndef conta_bancaria_ajax(request, pk, app_label, object_name):\n \"\"\"\n Views Ajax para pesquisa de municípios, usado na lista de formado tabela.\n \"\"\"\n # /cliente/contaBancaria/ajax/1/imovel/Beneficiario/ => beneficiario=1 app_label=imovel, object_name=Beneficiario\n model_cls = apps.get_model(app_label, object_name) # apps.get_model(imovel, Beneficiario)\n conta_bancaria_list = model_cls.objects.filter(Q(pessoa_id=pk)).order_by('banco')\n return render(request, \"%s/conta_bancaria_options.html\" % app_label,\n {\"conta_bancaria_list\": conta_bancaria_list})\n\n","repo_name":"waslucena/catgree","sub_path":"cliente/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"88"} +{"seq_id":"11581111454","text":"\nfrom sys import argv\ndef multiply(a, b):\n product = []\n for i in a:\n for j in b:\n if isinstance(i, list):\n if i[-1] >= j:\n continue\n product.append(i+[j])\n else:\n if i >= j:\n continue\n product.append([i, j])\n return [product]\n\ndef get_num(one):\n case, word = one.split(',')\n if not (case and word):\n print(0)\n return\n if len(case) < len(word):\n print(0)\n return\n if len(case) == len(word):\n if case == word:\n print(1)\n else:\n print(0)\n return\n occurrences = []\n for ch in word:\n indexes = [i for i, x in enumerate(case) if x == ch]\n occurrences.append(indexes)\n while len(occurrences) >= 2:\n occurrences = multiply(occurrences[0], occurrences[1]) + occurrences[2:]\n print(len(occurrences[0]))\n\n\nf = open(argv[1], 'r')\nfor one in f:\n if one != '\\n':\n get_num(one[:-1])\nf.close()\n\n\n\n\n","repo_name":"thinker3/py_learn","sub_path":"codeeval/subsequence.py","file_name":"subsequence.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"88"} +{"seq_id":"74454639983","text":"# Slip5_2B\r\ndef fibonacciGenerator():\r\n a=0\r\n b=1\r\n for i in range(6):\r\n yield b\r\n a,b= b,a+b\r\n\r\ndef main():\r\n obj = fibonacciGenerator()\r\n n = int(input())\r\n for i in range(n):\r\n try:\r\n print(next(obj))\r\n except Exception as e:\r\n print(e)\r\n return\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"barry-gadhiya/Java-Python","sub_path":"slip5_2B.py","file_name":"slip5_2B.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"22346292447","text":"\nimport cube\nimport fmt\nimport fileinput\n\n# Transform problem using steps read from fileinput.\n# Print the cube configuration after each step.\n\n# This is useful if you solve a rubiks equivalent problem\n# with a rubiks solver, and you want to manipulate the\n# sudoku cube without getting lost in the middle.\n\nproblem = (\n 6, 2, 1, 8, 1, 3, 5, 9, 3,\n 8, 6, 6, 4, 3, 9, 5, 7, 3, 7, 8, 4,\n 9, 7, 9, 2, 6, 3, 4, 4, 2, 6, 1, 8,\n 7, 7, 2, 2, 8, 7, 9, 2, 9, 6, 5, 5,\n 4, 5, 1, 4, 5, 1, 8, 3, 1)\n\ntransform_dict = dict(cube.turns)\n\nprint(fmt.fmt_cube(problem))\nfor line in fileinput.input():\n line = line.strip()\n trans = transform_dict[line]\n problem = cube.turn(problem, trans)\n print(line)\n print(fmt.fmt_cube(problem))\n\n","repo_name":"allenbh/rubiks_sudoku","sub_path":"walk.py","file_name":"walk.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"1813788671","text":"nums = [0,1,2,2,3,0,4,2]\nval = 2\n\nleft = 0\n\nfor r in range(len(nums)):\n if nums[r] != val:\n nums[left] = nums[r]\n left += 1\n\nprint(left)","repo_name":"mehedi-iut/leetcode","sub_path":"array/RemoveElement.py","file_name":"RemoveElement.py","file_ext":"py","file_size_in_byte":153,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28916542208","text":"import matplotlib.pyplot as plt\n\nimport pytest\n\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy.io import fits\nfrom astropy.tests.helper import remote_data\n\nfrom beast.plotting import plot_indiv_fit, plot_cmd, plot_cmd_with_fits, plot_filters\nfrom beast.tests.helpers import download_rename\n\nplt.switch_backend(\"agg\")\n\n\n@remote_data\n@pytest.mark.mpl_image_compare(tolerance=25)\ndef test_indiv_plot():\n\n # download cached version of fitting results\n stats_fname_cache = download_rename(\"beast_example_phat_stats.fits\")\n pdf1d_fname_cache = download_rename(\"beast_example_phat_pdf1d.fits\")\n\n starnum = 0\n\n # read in the stats\n stats = Table.read(stats_fname_cache)\n # open 1D PDF file\n pdf1d_hdu = fits.open(pdf1d_fname_cache)\n\n filters = [\n \"HST_WFC3_F275W\",\n \"HST_WFC3_F336W\",\n \"HST_ACS_WFC_F475W\",\n \"HST_ACS_WFC_F814W\",\n \"HST_WFC3_F110W\",\n \"HST_WFC3_F160W\",\n ]\n waves = np.asarray(\n [\n 2722.05531502,\n 3366.00507206,\n 4763.04670013,\n 8087.36760191,\n 11672.35909295,\n 15432.7387546,\n ]\n )\n\n fig, ax = plt.subplots(figsize=(8, 8))\n\n # make the plot!\n plot_indiv_fit.plot_beast_ifit(filters, waves, stats, pdf1d_hdu, starnum)\n\n return fig\n\n\n@remote_data\n@pytest.mark.mpl_image_compare(tolerance=10)\ndef test_plot_cmd():\n\n # Download example data from phat_small\n fitsfile = download_rename(\"b15_4band_det_27_A.fits\")\n\n # Plot CMD using defaults\n fig = plot_cmd.plot_cmd(fitsfile, show_plot=False)\n\n return fig\n\n\n@remote_data\n@pytest.mark.mpl_image_compare(tolerance=55)\ndef test_plot_cmd_with_fits():\n\n # Download example data from phat_small\n fitsfile = download_rename(\"b15_4band_det_27_A.fits\")\n\n # Download BEAST fits to example data\n beast_fitsfile = download_rename(\"beast_example_phat_stats.fits\")\n\n # Plot CMD using defaults\n fig = plot_cmd_with_fits.plot(fitsfile, beast_fitsfile)\n\n return fig\n\n\n@remote_data\n@pytest.mark.mpl_image_compare(tolerance=18)\ndef test_plot_filters():\n\n filter_names = [\n \"HST_WFC3_F225W\",\n \"HST_WFC3_F275W\",\n \"HST_WFC3_F336W\",\n \"HST_ACS_WFC_F475W\",\n \"HST_ACS_WFC_F550M\",\n \"HST_ACS_WFC_F814W\",\n \"HST_WFC3_F110W\",\n \"HST_WFC3_F160W\",\n ]\n\n filters = download_rename(\"filters.hd5\")\n\n # Plot filters using above arguments (the defaults)\n fig = plot_filters.plot_filters(filter_names, filterLib=filters, show_plot=False)\n\n return fig\n","repo_name":"giadapastorelli/beast","sub_path":"beast/plotting/tests/test_plots.py","file_name":"test_plots.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"33408804546","text":"\"\"\"Nox configuration.\"\"\"\n\nfrom __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom textwrap import dedent\n\nimport nox\n\ntry:\n from nox_poetry import Session, session\nexcept ImportError:\n message = f\"\"\"\\\n Nox failed to import the 'nox-poetry' package.\n Please install it using the following command:\n {sys.executable} -m pip install nox-poetry\"\"\"\n raise SystemExit(dedent(message)) from None\n\n\npackage = \"sensopy\"\npython_versions = [\"3.10\", \"3.9\", \"3.8\"]\nlocations = \"src\", \"tests\", \"noxfile.py\"\nnox.options.sessions = (\"tests\",)\n\n\n@session(python=python_versions)\ndef tests(session: Session) -> None:\n \"\"\"Execute pytest tests.\"\"\"\n session.install(\".\")\n session.install(\"coverage[toml]\", \"pytest\")\n try:\n session.run(\"coverage\", \"run\", \"--parallel\", \"-m\", \"pytest\", *session.posargs)\n finally:\n if session.interactive:\n session.notify(\"coverage\", posargs=[])\n\n\n@session(python=python_versions[0])\ndef coverage(session: Session) -> None:\n \"\"\"Upload coverage data.\"\"\"\n args = session.posargs or [\"report\"]\n\n session.install(\"coverage[toml]\")\n\n if not session.posargs and any(Path().glob(\".coverage.*\")):\n session.run(\"coverage\", \"combine\")\n\n session.run(\"coverage\", *args)\n","repo_name":"edgarrmondragon/SensoPy","sub_path":"noxfile.py","file_name":"noxfile.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"3465195892","text":"\"\"\"Define a base object for interacting with the Eufy camera API.\"\"\"\nfrom datetime import datetime\nimport logging\nfrom typing import Dict, Optional\n\nfrom aiohttp import ClientSession\nfrom aiohttp.client_exceptions import ClientError\n\nfrom .device import Device, DeviceDict, StationDict\nfrom .errors import InvalidCredentialsError, RequestError, raise_error\nfrom .param import Params\n\n_LOGGER: logging.Logger = logging.getLogger(__name__)\n\nAPI_BASE: str = \"https://mysecurity.eufylife.com/api/v1\"\n\n\nclass API: # pylint: disable=too-many-instance-attributes\n \"\"\"Define the API object.\"\"\"\n\n def __init__(self, email: str, password: str, websession: ClientSession) -> None:\n \"\"\"Initialize.\"\"\"\n self._api_base: str = API_BASE\n self._email: str = email\n self._password: str = password\n self._retry_on_401: bool = False\n self._session: ClientSession = websession\n self._token: Optional[str] = None\n self._token_expiration: Optional[datetime] = None\n self.devices: DeviceDict = DeviceDict(self)\n self.stations: StationDict = StationDict(self)\n\n @property\n def cameras(self) -> Dict[str, Device]:\n \"\"\"Return a dictionary of cameras. Deprecated.\"\"\"\n return {sn: device for sn, device in self.devices.items() if device.is_camera}\n\n async def async_authenticate(self) -> None:\n \"\"\"Authenticate and get an access token.\"\"\"\n auth_resp = await self.request(\n \"post\",\n \"passport/login\",\n json={\"email\": self._email, \"password\": self._password},\n )\n data = auth_resp[\"data\"]\n\n self._retry_on_401 = False\n self._token = data[\"auth_token\"]\n self._token_expiration = datetime.fromtimestamp(data[\"token_expires_at\"])\n domain = data.get(\"domain\")\n if domain:\n self._api_base = f\"https://{domain}/v1\"\n _LOGGER.info(\"Switching to another API_BASE: %s\", self._api_base)\n\n async def async_get_history(self) -> dict:\n \"\"\"Get the device's history.\"\"\"\n history_resp = await self.request(\"post\", \"event/app/get_all_history_record\")\n return history_resp[\"data\"]\n\n async def async_update_device_info(self) -> None:\n \"\"\"Get the latest device info.\"\"\"\n devices_resp = await self.request(\"post\", \"app/get_devs_list\")\n self.devices.update(devices_resp[\"data\"])\n\n stations_resp = await self.request(\"post\", \"app/get_hub_list\")\n self.stations.update(stations_resp[\"data\"])\n\n async def async_set_params(self, device: Device, data: dict) -> None:\n \"\"\"Set device parameters.\"\"\"\n params = Params()\n params.update(data)\n serialized_params = [param.param_info for param in params]\n\n if device.is_station:\n await self.request(\n \"post\",\n \"app/upload_hub_params\",\n json={\n \"station_sn\": device.station_serial,\n \"params\": serialized_params,\n },\n )\n else:\n await self.request(\n \"post\",\n \"app/upload_devs_params\",\n json={\n \"device_sn\": device.serial,\n \"station_sn\": device.station_serial,\n \"params\": serialized_params,\n },\n )\n\n async def async_start_stream(self, device: Device) -> str:\n \"\"\"Start the device stream and return the RTSP URL.\"\"\"\n start_resp = await self.request(\n \"post\",\n \"web/equipment/start_stream\",\n json={\n \"device_sn\": device.serial,\n \"station_sn\": device.station_serial,\n \"proto\": 2,\n },\n )\n\n return start_resp[\"data\"][\"url\"]\n\n async def async_stop_stream(self, device: Device) -> None:\n \"\"\"Stop the device stream.\"\"\"\n await self.request(\n \"post\",\n \"web/equipment/stop_stream\",\n json={\n \"device_sn\": device.serial,\n \"station_sn\": device.station_serial,\n \"proto\": 2,\n },\n )\n\n async def request(\n self,\n method: str,\n endpoint: str,\n *,\n headers: Optional[dict] = None,\n json: Optional[dict] = None,\n ) -> dict:\n \"\"\"Make a request the API.com.\"\"\"\n if self._token_expiration and datetime.now() >= self._token_expiration:\n _LOGGER.info(\"Access token expired; fetching a new one\")\n self._token = None\n self._token_expiration = None\n await self.async_authenticate()\n\n url: str = f\"{self._api_base}/{endpoint}\"\n\n if not headers:\n headers = {}\n if self._token:\n headers[\"x-auth-token\"] = self._token\n\n async with self._session.request(\n method, url, headers=headers, json=json\n ) as resp:\n try:\n resp.raise_for_status()\n data: dict = await resp.json(content_type=None)\n\n if not data:\n raise RequestError(f\"No response while requesting {endpoint}\")\n\n raise_error(data)\n\n return data\n except ClientError as err:\n if \"401\" in str(err):\n if self._retry_on_401:\n raise InvalidCredentialsError(\"Token failed multiple times\")\n\n self._retry_on_401 = True\n await self.async_authenticate()\n return await self.request(\n method, endpoint, headers=headers, json=json\n )\n raise RequestError(\n f\"There was an unknown error while requesting {endpoint}: {err}\"\n ) from None\n\n\nasync def async_login(email: str, password: str, websession: ClientSession) -> API:\n \"\"\"Return an authenticated API object.\"\"\"\n api: API = API(email, password, websession)\n await api.async_authenticate()\n await api.async_update_device_info()\n return api\n","repo_name":"FuzzyMistborn/python-eufy-security","sub_path":"eufy_security/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6103,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"91"} +{"seq_id":"24681806850","text":"import json\nimport pandas as pd\n\ntweets_data_path = './twitter_data.txt'\n\ntweets_data = []\ntweets_file = open(tweets_data_path, \"r\")\nfor line in tweets_file:\n try:\n tweet = json.loads(line)\n tweets_data.append(tweet)\n except:\n continue\n\t\t\n\t\t\ntweets = pd.DataFrame()\n\ntweet_text = []\n\nfor i in range(len(tweets_data)):\n try:\n string = tweets_data[i]['text']\n tweet_text.append(string)\n except:\n continue\n\t\t\ntweets['tweet_text'] = tweet_text\n\ntweets.to_csv('./data/tweets_indonesia_unseen.csv', encoding = 'utf-8')","repo_name":"rajaswa/Disaster-Management-","sub_path":"scripts/twitter_stream2.py","file_name":"twitter_stream2.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"42750493892","text":"import gym\nfrom gym.envs.registration import register\n\n\ndef make(\n domain_name,\n task_name,\n seed=1,\n visualize_reward=True,\n from_pixels=False,\n height=84,\n width=84,\n camera_id=0,\n frame_skip=1,\n episode_length=1000,\n is_distracting_cs=None,\n distracting_cs_intensity=None,\n background_dataset_paths=None,\n environment_kwargs=None,\n setting_kwargs=None,\n time_limit=None,\n channels_first=True\n):\n if is_distracting_cs:\n env_id = 'dmc_%s_%s-%s-v1' % (domain_name, task_name, 'dcs')\n else:\n env_id = 'dmc_%s_%s-v1' % (domain_name, task_name)\n\n if from_pixels:\n assert not visualize_reward, 'cannot use visualize reward when learning from pixels'\n\n # shorten episode length\n max_episode_steps = (episode_length + frame_skip - 1) // frame_skip\n time_limit = 1e6\n\n if not env_id in gym.envs.registry.env_specs:\n task_kwargs = {}\n if seed is not None:\n task_kwargs['random'] = seed\n if time_limit is not None:\n task_kwargs['time_limit'] = time_limit\n register(\n id=env_id,\n entry_point='dmc2gym.wrappers:DMCWrapper',\n kwargs=dict(\n domain_name=domain_name,\n task_name=task_name,\n task_kwargs=task_kwargs,\n environment_kwargs=environment_kwargs,\n setting_kwargs=setting_kwargs,\n visualize_reward=visualize_reward,\n from_pixels=from_pixels,\n height=height,\n width=width,\n camera_id=camera_id,\n frame_skip=frame_skip,\n is_distracting_cs=is_distracting_cs,\n distracting_cs_intensity=distracting_cs_intensity,\n background_dataset_paths=background_dataset_paths,\n channels_first=channels_first,\n ),\n max_episode_steps=max_episode_steps,\n )\n return gym.make(env_id)\n","repo_name":"nicklashansen/dmcontrol-generalization-benchmark","sub_path":"src/env/dmc2gym/dmc2gym/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":138,"dataset":"github-code","pt":"91"} +{"seq_id":"19337427997","text":"from http.server import BaseHTTPRequestHandler\nfrom datetime import datetime\nfrom urllib import parse \nimport platform\n\n\n\nclass handler(BaseHTTPRequestHandler):\n\n def do_GET(self):\n s= self.path\n url_components = parse.urlparse(s)\n query_string = parse.parse_qsl(url_components.query)\n dic=dict(query_string)\n name= dic.get('name')\n if name:\n message = f'Hello, {name}!'\n else:\n message = 'Hello, Stranger!' \n #message += f\"\\n Greetings from {self.server.server_address[1]} at {str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))} \\n\"\n message += f\"\\n Greetings from {platform.python_version()} at {str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))} \\n\"\n\n self.send_response(200)\n self.send_header('Content-type', 'text/plain')\n self.end_headers()\n self.wfile.write(str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')).encode())\n \n\n\n self.wfile.write(message.encode())\n\n\n\n # def do_GET(self):\n # self.send_response(200)\n # self.send_header('Content-type', 'text/plain')\n # self.end_headers()\n # self.wfile.write(str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')).encode())\n\n return\n ","repo_name":"GhaidaMomani/serverless","sub_path":"api/date.py","file_name":"date.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"22650676627","text":"import time\nimport pyautogui\nimport random\n\nimport window_func\nimport img_match\nimport delay\n\n\ndef clickRandomPos(pos, x_move_max, y_move_max):\n x_move = random.randint(0, x_move_max)\n y_move = random.randint(0, y_move_max)\n pyautogui.moveTo(pos[0] + x_move, pos[1] + y_move)\n delay.standard_delay(0.1)\n pyautogui.click()\n\ndef clickAdventureButton():\n # 底部\"冒险\"按钮\n pos = img_match.findAdventureButton()\n\n clickRandomPos((pos[0] + 20, pos[1] + 10), 160, 30)\n delay.random_float_delay(0.3, 0.2)\n\ndef clickPrincessArena():\n # \"公主竞技场\"按钮\n pos = img_match.findPrincessArenaButton()\n\n clickRandomPos((pos[0] + 20, pos[1] + 10), 270, 90)\n delay.random_float_delay(0.3, 0.2)\n\ndef clickChangeTeamButton():\n # \"防守设定\"按钮\n pos = img_match.findChangeTeamButton()\n\n clickRandomPos((pos[0] + 20, pos[1] + 10), 150, 40)\n delay.random_float_delay(1.5, 0.2)\n\ndef clickCancelAll():\n # 点击空白区域取消所有操作\n pos = (window_func.NOX_window_rect[0] + 552, window_func.NOX_window_rect[1] + 725)\n for _ in range(5):\n clickRandomPos((pos[0] + 20, pos[1] + 4), 140, 12)\n delay.random_float_delay(0.1, 0.2)\n\ndef clickClearTeam():\n # 清空队伍\n pos = (window_func.NOX_window_rect[0] + 680, window_func.NOX_window_rect[1] + 595)\n try_times = 0\n while not img_match.isEmptyTeam():\n clickRandomPos((pos[0] + 20, pos[1] + 20), 60, 60)\n try_times += 1\n if try_times >= 10:\n raise img_match.ButtonNotFoundException(\"clear team\")\n delay.random_float_delay(0.3, 0.2)\n delay.random_float_delay(0.3, 0.2)\n\ndef clickSelectTeam(team_id:int):\n # 选择队伍1-3\n pos = img_match.findTeamSelectButton(team_id)\n clickRandomPos((pos[0] + 20, pos[1] + 10), 90, 20)\n delay.random_float_delay(0.3, 0.2)\n\ndef clickMyTeamButton():\n # \"我的队伍\"按钮\n pos = img_match.findMyTeamButton()\n\n clickRandomPos((pos[0] + 20, pos[1] + 10), 80, 10)\n delay.random_float_delay(0.8, 0.2)\n\ndef clickPreparedPage(page_id:int):\n # \"我的队伍\"界面选择指定队伍页1-5\n pos = (window_func.NOX_window_rect[0] + 68 + (page_id - 1) * 191, window_func.NOX_window_rect[1] + 131)\n clickRandomPos((pos[0] + 20, pos[1] + 10), 150, 20)\n delay.random_float_delay(0.5, 0.3)\n\ndef clickReverseRank():\n # \"我的队伍\"界面更改排序顺序\n pos = (window_func.NOX_window_rect[0] + 1089, window_func.NOX_window_rect[1] + 137)\n clickRandomPos((pos[0] + 20, pos[1] + 10), 80, 10)\n delay.random_float_delay(0.5, 0.3)\n\ndef clickPreparedTeam(team_id:int):\n # \"我的队伍\"界面选择队伍1-3\n pos = (window_func.NOX_window_rect[0] + 949, window_func.NOX_window_rect[1] + 233 + (team_id - 1) * 159)\n clickRandomPos((pos[0] + 20, pos[1] + 10), 170, 40)\n delay.random_float_delay(0.5, 0.3)\n\ndef clickFinish():\n # 配队界面确认\n pos = (window_func.NOX_window_rect[0] + 1017, window_func.NOX_window_rect[1] + 608)\n clickRandomPos((pos[0] + 20, pos[1] + 10), 170, 40)\n delay.random_float_delay(0.3, 0.3)","repo_name":"SkyMXF/PCR-auto-change-team","sub_path":"mouse_move.py","file_name":"mouse_move.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"11781874190","text":"from typing import Dict, List, Optional, Set, Union\n\nimport numpy as np\n\nfrom power_grid_model.core.error_handling import PowerGridBatchError, assert_no_error, find_error\nfrom power_grid_model.core.index_integer import IdNp, IdxNp\nfrom power_grid_model.core.options import Options\nfrom power_grid_model.core.power_grid_core import IDPtr, IdxPtr, ModelPtr\nfrom power_grid_model.core.power_grid_core import power_grid_core as pgc\nfrom power_grid_model.core.power_grid_meta import CDataset, initialize_array, power_grid_meta_data, prepare_cpp_array\nfrom power_grid_model.enum import CalculationMethod, CalculationType\n\n\nclass PowerGridModel:\n \"\"\"\n Main class for Power Grid Model\n \"\"\"\n\n _model_ptr: ModelPtr\n _all_component_count: Optional[Dict[str, int]]\n _batch_error: Optional[PowerGridBatchError]\n\n @property\n def batch_error(self) -> Optional[PowerGridBatchError]:\n \"\"\"\n Get the batch error object, if present\n\n Returns: Batch error object, or None\n\n \"\"\"\n return self._batch_error\n\n @property\n def _model(self):\n if not self._model_ptr:\n raise TypeError(\"You have an empty instance of PowerGridModel!\")\n return self._model_ptr\n\n @property\n def all_component_count(self) -> Dict[str, int]:\n \"\"\"\n Get count of number of elements per component type.\n If the count for a component type is zero, it will not be in the returned dictionary.\n Returns:\n a dictionary with\n key: component type name\n value: integer count of elements of this type\n \"\"\"\n if self._all_component_count is None:\n raise TypeError(\"You have an empty instance of PowerGridModel!\")\n return self._all_component_count\n\n def copy(self) -> \"PowerGridModel\":\n \"\"\"\n\n Copy the current model\n\n Returns:\n a copy of PowerGridModel\n \"\"\"\n new_model = PowerGridModel.__new__(PowerGridModel)\n new_model._model_ptr = pgc.copy_model(self._model) # pylint: disable=W0212\n assert_no_error()\n new_model._all_component_count = self._all_component_count # pylint: disable=W0212\n return new_model\n\n def __copy__(self):\n return self.copy()\n\n def __new__(cls, *_args, **_kwargs):\n instance = super().__new__(cls)\n instance._model_ptr = ModelPtr()\n instance._all_component_count = None\n return instance\n\n def __init__(self, input_data: Dict[str, np.ndarray], system_frequency: float = 50.0):\n \"\"\"\n Initialize the model from an input data set.\n\n Args:\n input_data: input data dictionary\n key: component type name\n value: 1D numpy structured array for this component input\n system_frequency: frequency of the power system, default 50 Hz\n \"\"\"\n # destroy old instance\n pgc.destroy_model(self._model_ptr)\n self._all_component_count = None\n # create new\n prepared_input: CDataset = prepare_cpp_array(\"input\", input_data)\n self._model_ptr = pgc.create_model(\n system_frequency,\n components=prepared_input.components,\n n_components=prepared_input.n_components,\n component_sizes=prepared_input.n_component_elements_per_scenario,\n input_data=prepared_input.data_ptrs_per_component,\n )\n assert_no_error()\n self._all_component_count = {\n k: v.n_elements_per_scenario for k, v in prepared_input.dataset.items() if v.n_elements_per_scenario > 0\n }\n\n def update(self, *, update_data: Dict[str, np.ndarray]):\n \"\"\"\n Update the model with changes.\n Args:\n update_data: update data dictionary\n key: component type name\n value: 1D numpy structured array for this component update\n Returns:\n None\n \"\"\"\n prepared_update: CDataset = prepare_cpp_array(\"update\", update_data)\n pgc.update_model(\n self._model,\n prepared_update.n_components,\n prepared_update.components,\n prepared_update.n_component_elements_per_scenario,\n prepared_update.data_ptrs_per_component,\n )\n assert_no_error()\n\n def get_indexer(self, component_type: str, ids: np.ndarray):\n \"\"\"\n Get array of indexers given array of ids for component type\n\n Args:\n component_type: type of component\n ids: array of ids\n\n Returns:\n array of inderxers, same shape as input array ids\n\n \"\"\"\n ids_c = np.ascontiguousarray(ids, dtype=IdNp).ctypes.data_as(IDPtr)\n indexer = np.empty_like(ids, dtype=IdxNp, order=\"C\")\n indexer_c = indexer.ctypes.data_as(IdxPtr)\n size = ids.size\n # call c function\n pgc.get_indexer(self._model, component_type, size, ids_c, indexer_c)\n assert_no_error()\n return indexer\n\n # pylint: disable=too-many-locals\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-arguments\n def _calculate(\n self,\n calculation_type: CalculationType,\n symmetric: bool,\n error_tolerance: float,\n max_iterations: int,\n calculation_method: Union[CalculationMethod, str],\n update_data: Optional[Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]]],\n threading: int,\n output_component_types: Optional[Union[Set[str], List[str]]],\n continue_on_batch_error: bool,\n ):\n \"\"\"\n Core calculation routine\n\n Args:\n calculation_type:\n symmetric:\n error_tolerance:\n max_iterations:\n calculation_method:\n update_data:\n threading:\n output_component_types:\n continue_on_batch_error:\n\n Returns:\n\n \"\"\"\n if isinstance(calculation_method, str):\n calculation_method = CalculationMethod[calculation_method]\n if symmetric:\n output_type = \"sym_output\"\n else:\n output_type = \"asym_output\"\n self._batch_error = None\n\n # prepare update dataset\n # update data exist for batch calculation\n if update_data is not None:\n batch_calculation = True\n # no update dataset, create one batch with empty set\n else:\n batch_calculation = False\n update_data = {}\n prepared_update: CDataset = prepare_cpp_array(data_type=\"update\", array_dict=update_data)\n batch_size = prepared_update.batch_size\n\n # prepare result dataset\n all_component_count = self.all_component_count\n # for power flow, there is no need for sensor output\n if calculation_type == CalculationType.power_flow:\n all_component_count = {k: v for k, v in all_component_count.items() if \"sensor\" not in k}\n # limit all component count to user specified component types in output\n if output_component_types is None:\n output_component_types = set(all_component_count.keys())\n # raise error is some specified components are unknown\n unknown_components = [x for x in output_component_types if x not in power_grid_meta_data[output_type]]\n if unknown_components:\n raise KeyError(f\"You have specified some unknown component types: {unknown_components}\")\n all_component_count = {k: v for k, v in all_component_count.items() if k in output_component_types}\n # create result dataset\n result_dict = {}\n for name, count in all_component_count.items():\n # intialize array\n arr = initialize_array(output_type, name, (batch_size, count), empty=True)\n result_dict[name] = arr\n prepared_result: CDataset = prepare_cpp_array(data_type=output_type, array_dict=result_dict)\n\n # prepare options\n opt: Options = Options()\n opt.calculation_type = calculation_type.value\n opt.calculation_method = calculation_method.value\n opt.symmetric = symmetric\n opt.error_tolerance = error_tolerance\n opt.max_iteration = max_iterations\n opt.threading = threading\n\n # run calculation\n pgc.calculate(\n # model and options\n self._model,\n opt.opt,\n # result dataset\n prepared_result.n_components,\n prepared_result.components,\n prepared_result.data_ptrs_per_component,\n # update dataset\n batch_size,\n prepared_update.n_components,\n prepared_update.components,\n prepared_update.n_component_elements_per_scenario,\n prepared_update.indptrs_per_component,\n prepared_update.data_ptrs_per_component,\n )\n\n # error handling\n if not continue_on_batch_error:\n assert_no_error(batch_size=batch_size)\n else:\n # continue on batch error\n error: Optional[RuntimeError] = find_error(batch_size=batch_size)\n if error is not None:\n if isinstance(error, PowerGridBatchError):\n # continue on batch error\n self._batch_error = error\n else:\n # raise normal error\n raise error\n\n # flatten array for normal calculation\n if not batch_calculation:\n result_dict = {k: v.ravel() for k, v in result_dict.items()}\n\n return result_dict\n\n def calculate_power_flow(\n self,\n *,\n symmetric: bool = True,\n error_tolerance: float = 1e-8,\n max_iterations: int = 20,\n calculation_method: Union[CalculationMethod, str] = CalculationMethod.newton_raphson,\n update_data: Optional[Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]]] = None,\n threading: int = -1,\n output_component_types: Optional[Union[Set[str], List[str]]] = None,\n continue_on_batch_error: bool = False,\n ) -> Dict[str, np.ndarray]:\n \"\"\"\n Calculate power flow once with the current model attributes.\n Or calculate in batch with the given update dataset in batch\n\n Args:\n symmetric:\n True: three-phase symmetric calculation, even for asymmetric loads/generations\n False: three-phase asymmetric calculation\n error_tolerance:\n error tolerance for voltage in p.u., only applicable when iterative=True\n max_iterations:\n maximum number of iterations, only applicable when iterative=True\n calculation_method: an enumeration or string\n newton_raphson: use Newton-Raphson iterative method (default)\n linear: use linear method\n update_data:\n None: calculate power flow once with the current model attributes\n A dictionary for batch calculation with batch update\n key: component type name to be updated in batch\n value:\n a 2D numpy structured array for homogeneous update batch\n Dimension 0: each batch\n Dimension 1: each updated element per batch for this component type\n **or**\n a dictionary containing two keys, for inhomogeneous update batch\n indptr: a 1D integer numpy array with length n_batch + 1\n given batch number k, the update array for this batch is\n data[indptr[k]:indptr[k + 1]]\n This is the concept of compressed sparse structure\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html\n data: 1D numpy structured array in flat\n threading:\n only applicable for batch calculation\n < 0 sequential\n = 0 parallel, use number of hardware threads\n > 0 specify number of parallel threads\n output_component_types: list or set of component types you want to be present in the output dict.\n By default all component types will be in the output\n continue_on_batch_error: if the program continues (instead of throwing error) if some scenarios fails\n\n Returns:\n dictionary of results of all components\n key: component type name to be updated in batch\n value:\n for single calculation: 1D numpy structured array for the results of this component type\n for batch calculation: 2D numpy structured array for the results of this component type\n Dimension 0: each batch\n Dimension 1: the result of each element for this component type\n Error handling:\n in case an error in the core occurs, an exception will be thrown\n \"\"\"\n return self._calculate(\n CalculationType.power_flow,\n symmetric=symmetric,\n error_tolerance=error_tolerance,\n max_iterations=max_iterations,\n calculation_method=calculation_method,\n update_data=update_data,\n threading=threading,\n output_component_types=output_component_types,\n continue_on_batch_error=continue_on_batch_error,\n )\n\n def calculate_state_estimation(\n self,\n *,\n symmetric: bool = True,\n error_tolerance: float = 1e-8,\n max_iterations: int = 20,\n calculation_method: Union[CalculationMethod, str] = CalculationMethod.iterative_linear,\n update_data: Optional[Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]]] = None,\n threading: int = -1,\n output_component_types: Optional[Union[Set[str], List[str]]] = None,\n continue_on_batch_error: bool = False,\n ) -> Dict[str, np.ndarray]:\n \"\"\"\n Calculate state estimation once with the current model attributes.\n Or calculate in batch with the given update dataset in batch\n\n Args:\n symmetric:\n True: three-phase symmetric calculation, even for asymmetric loads/generations\n False: three-phase asymmetric calculation\n error_tolerance:\n error tolerance for voltage in p.u., only applicable when iterative=True\n max_iterations:\n maximum number of iterations, only applicable when iterative=True\n calculation_method: an enumeration\n iterative_linear: use iterative linear method\n update_data:\n None: calculate state estimation once with the current model attributes\n A dictionary for batch calculation with batch update\n key: component type name to be updated in batch\n value:\n a 2D numpy structured array for homogeneous update batch\n Dimension 0: each batch\n Dimension 1: each updated element per batch for this component type\n **or**\n a dictionary containing two keys, for inhomogeneous update batch\n indptr: a 1D integer numpy array with length n_batch + 1\n given batch number k, the update array for this batch is\n data[indptr[k]:indptr[k + 1]]\n This is the concept of compressed sparse structure\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html\n data: 1D numpy structured array in flat\n threading:\n only applicable for batch calculation\n < 0 sequential\n = 0 parallel, use number of hardware threads\n > 0 specify number of parallel threads\n output_component_types: list or set of component types you want to be present in the output dict.\n By default all component types will be in the output\n continue_on_batch_error: if the program continues (instead of throwing error) if some scenarios fails\n\n\n Returns:\n dictionary of results of all components\n key: component type name to be updated in batch\n value:\n for single calculation: 1D numpy structured array for the results of this component type\n for batch calculation: 2D numpy structured array for the results of this component type\n Dimension 0: each batch\n Dimension 1: the result of each element for this component type\n Error handling:\n in case an error in the core occurs, an exception will be thrown\n \"\"\"\n return self._calculate(\n CalculationType.state_estimation,\n symmetric=symmetric,\n error_tolerance=error_tolerance,\n max_iterations=max_iterations,\n calculation_method=calculation_method,\n update_data=update_data,\n threading=threading,\n output_component_types=output_component_types,\n continue_on_batch_error=continue_on_batch_error,\n )\n\n def __del__(self):\n pgc.destroy_model(self._model_ptr)\n","repo_name":"Chillthrower/power-grid-model","sub_path":"src/power_grid_model/core/power_grid_model.py","file_name":"power_grid_model.py","file_ext":"py","file_size_in_byte":17569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"4929166603","text":"\"\"\"\nDefine the color tables.\n\"\"\"\nfrom PyQt5.QtGui import qRgb\n\nALIVE_COLOR = qRgb(255, 255, 255)\nDEAD_COLOR = qRgb(0, 0, 0)\n\nCOLOR_TABLE = [DEAD_COLOR] + [qRgb(0, 255, 255 - i * 2) for i in range(128)] + \\\n [qRgb(i*2, 255-i*2, 0) for i in range(127)]\n# Alive cell: white - Dead cell: black\nBW_COLOR_TABLE = [DEAD_COLOR] + [ALIVE_COLOR for i in range(256)]\n","repo_name":"JacopoBartoli/game-of-life","sub_path":"utils/colors.py","file_name":"colors.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"40306930175","text":"from django.shortcuts import render,redirect\nfrom django.views import View\nfrom django.http import JsonResponse\nfrom . models import *\nfrom django.core.mail import EmailMessage\nfrom django.contrib import auth\nfrom django.contrib import messages\nimport json\nfrom django.http import JsonResponse\n# Create your views here.\nclass DashboardView(View):\n def get(self,request):\n projects = Project.objects.all()\n return render(request,'home/index.html',{\"projects\":projects})\nclass LoginView(View):\n def get(self,request):\n return render(request,'authentication/login.html')\n def post(self,request):\n email = request.POST['email']\n password = request.POST['password']\n if email and password:\n user = auth.authenticate(email=email,password=password)\n if user:\n auth.login(request,user)\n return redirect('/')\n else:\n messages.error(request,'Invalid login')\n return render(request,'authentication/login.html')\n messages.error(request,'Invalid login credentials')\n return render(request,'authentication/login.html')\nclass LogoutView(View):\n def post(self,request):\n auth.logout(request)\n messages.success(request,\"You have successfully logged out\")\n return redirect('/')\n####REGISTER VIEW\nclass RegisterView(View):\n def get(self,request):\n return render(request,'authentication/register.html')\n def post(self,request):\n username = request.POST['username']\n email = request.POST['email']\n full_name = request.POST['full_name']\n password = request.POST['password']\n bio = request.POST['bio']\n if not Profile.objects.filter(username=username).exists():\n if not Profile.objects.filter(email=email).exists():\n profile = Profile.objects.create_user(username=username,email=email,password=password,full_name=full_name,bio=bio)\n # profile.set_password(password)\n profile.save()\n email = EmailMessage(\n 'Awards Account',\n 'Awards account created successfully',\n 'liznabuuso@gmail.com',\n [email]\n )\n email.send(fail_silently=True)\n return JsonResponse({\"message\":\"Account created successfully\",\"status\":201},status=201)\n else:\n return JsonResponse({\"error\":\"Username already taken\",\"status\":400},status=400)\n else:\n return JsonResponse({\"error\":\"Email already taken\",\"status\":400},status=400)\n##PROJECTS\nclass ProjectsView(View):\n def post(self,request):\n img = request.FILES.get(\"image\")\n name = request.POST['name']\n description = request.POST['description']\n profile = request.POST['profile']\n link = request.POST['link']\n # fss = FileSystemStorage()\n # filename = fss.save(img.name,img)\n # url = fss.url(filename)\n project = Project()\n project.image = img\n project.name = name\n project.description = description\n p = Profile.objects.get(pk=profile)\n project.profile = p\n project.link = link\n project.save_image()\n # Image.save_image(image=url,image_name=image_name,image_caption=image_caption,profile=profile)\n return JsonResponse({\"success\":\"Image uploaded successfully\",\"status\":201},status=201)\nclass RatingView(View):\n def post(self,request):\n design = request.POST['design']\n usability = request.POST['usability']\n content = request.POST['content']\n project = request.POST['project']\n profile = request.POST['profile']\n rating = Rating(design_rating=design,usability_rating=usability,content_rating=content,project_id=project,profile_id=profile)\n rating.save()\n return JsonResponse({\"message\":\"Rated successfully\",\"status\":201},status=201)\n##USERS\nclass ProfilesView(View):\n def get(self,request):\n projects = Project.objects.filter(profile=request.user)\n return render(request,'home/profile.html',{\"projects\":projects})\n###SEARCH PROJECTS\nclass SearchProject(View):\n def post(self,request):\n search_str = json.loads(request.body).get('searchText')\n projects = Project.filter(name__icontains=search_str) | Project.filter(design_rating__icontains=search_str) | Project.filter(usability_rating__icontains=search_str) | Project.filter(profile__full_name__icontains=search_str) \n data = projects.values()\n return JsonResponse(list(data),safe=False)\n####PROFILE API\nclass ProfileAPIView(View):\n def get(self,request):\n profile = Profile.objects.all()\n profiles = [{\"name\":profile.full_name,\"email\":profile.email,\"username\":profile.username,\"bio\":profile,\"bio\":profile.bio} for profile in profile]\n return JsonResponse(profiles,safe=False)\n####PROJECTS API\nclass ProjectsAPIView(View):\n def get(self,request):\n projects = Project.objects.all()\n project_list = [{\"image\":project.image.url,\"name\":project.name,\"description\":project.description,\"link\":project.link,\"created_date\":project.created_date,\"updated_date\":project.updated_date,\"design_rating\":project.design_rating,\"content_rating\":project.content_rating,\"total_raters\":project.total_raters} for project in projects]\n return JsonResponse(project_list,safe=False)","repo_name":"Nabuuso/Awards","sub_path":"awards/projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20179403292","text":"from flask import Flask, request, jsonify, make_response, send_file\nimport json\nimport random\nimport pickle\nimport config as CFG\nimport sklearn\nimport numpy as np\nfrom lime.lime_tabular import LimeTabularExplainer\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndef load_model():\n return pickle.load(open(CFG.MODEL_FILE, 'rb'))\n\n\ndef load_age_mons_preprocessing():\n return pickle.load(open(CFG.AGE_MONS_PREPROCESSING_FILE, 'rb'))\n\n\napp = Flask(__name__)\nmodel = load_model()\nage_mons_preprocessing = load_age_mons_preprocessing()\ntraining = pd.read_csv(CFG.TRAINING)\n\n\ndef convert_json(my_dict):\n new_json = {}\n for num, i in enumerate(my_dict):\n if num <= 8:\n if my_dict[i] == 2 or my_dict[i] == 3 or my_dict[i] == 4:\n new_json[i] = 1\n else:\n new_json[i] = 0\n elif num == 9:\n if my_dict[i] == 0 or my_dict[i] == 1 or my_dict[i] == 2:\n new_json[i] = 1\n else:\n new_json[i] = 0\n elif num == 11:\n if my_dict[i] == 2:\n new_json[i] = random.randint(0, 1)\n else:\n new_json[i] = my_dict[i]\n else:\n new_json[i] = my_dict[i]\n\n return json.dumps(new_json)\n\n\n@app.route(\"/api/predict\", methods=[\"POST\"])\ndef predict():\n try:\n my_json = request.get_json()\n encoded_dict = convert_json(my_json)\n dictionary = eval(encoded_dict)\n\n normalize_age_mons = age_mons_preprocessing.transform([[dictionary['age_month']]])[0, 0]\n\n dictionary['age_month'] = normalize_age_mons\n my_dict = np.array([list(dictionary.values())])\n\n prediction_probability = model.predict_proba(my_dict)[0]\n\n if prediction_probability[1] > 0.5:\n prediction_probability = prediction_probability[1]\n predict_spectrum = 1\n else:\n prediction_probability = prediction_probability[0]\n predict_spectrum = 0\n\n response = {\"prediction\": predict_spectrum, \"prediction_probability\": prediction_probability}\n\n return make_response(jsonify(response), 200)\n\n except ValueError:\n return 'Bad Request', 400\n\n\n@app.route(\"/api/explain\", methods=[\"POST\"])\ndef explain():\n try:\n my_json = request.get_json()\n encoded_dict = convert_json(my_json)\n dictionary = eval(encoded_dict)\n\n normalize_age_mons = age_mons_preprocessing.transform([[dictionary['age_month']]])[0, 0]\n\n dictionary['age_month'] = normalize_age_mons\n pred = np.array([x[1] for x in dictionary.items()])\n\n exp = LimeTabularExplainer(training.values, feature_names=training.columns, discretize_continuous=True)\n\n fig = exp.explain_instance(pred, model.predict_proba).as_pyplot_figure()\n fig.figsize = (30, 10)\n plt.tight_layout()\n plt.savefig('explain.png')\n\n return send_file('explain.png', mimetype='image/png', as_attachment=True)\n\n except ValueError:\n return 'Bad Request', 400\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"regCode/spectrum-screen","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"2441866099","text":"'''\nDescription\n\nInput:\n\tA table that has columns:\n\t\t1. genepop file (curretnly with a signel pop only)\n\t\t2. individual name in genepop file\n\t\t3. replicate number that includes the individual, at the lowest proportion subsampled\n\t\t4. if more proportions, then as above, at the next lowest proportion subsampled\n\t\t.\n\t\t.\n\t\t.\n\t\tn. if more proportions, then as above, at highest proportion subsampled\n\t\n\tA proportion between 0 and 1.0\n\tA preplicate number\n\nOutput:\n\t1 To standard out, individual names, one to a line, that are included in the \n\treplicate at that proportion.\n\n'''\nfrom __future__ import print_function\nfrom builtins import range\n__filename__ = \"indiv.from.perc.and.rep.num.py\"\n__date__ = \"20160524\"\n__author__ = \"Ted Cosart\"\n\n\nimport sys\nimport os\n\nDELIM_TABLE=\"\\t\"\nDELIM_REP_NUMS=\",\"\nINDEX_TABLE_INDIV_NAME=1\nINDEX_TABLE_GP_FILE=0\n\n#indicates how much output info to produce:\nINDIV_ONLY=0\nINDIV_AND_ALLELES=1\n\ndef do_usage_check( ls_this_argv, \n\t\tls_required_arg_descriptions, \n\t\tls_optional_arg_descriptions = [],\n\t\tb_multi_line_msg=False,\n\t\tb_unlimited_final_args=False,\n\t\ts_note=None):\n\t'''\n\targ 1 expects sys.argv, or a copy\n\targ 2 expects a list strings, in order, required args\n\targ 3 expects a list of strings, in order, any optional args\n\t'''\n\ti_num_args_required = len( ls_required_arg_descriptions )\n\ti_num_args_optional = len( ls_optional_arg_descriptions )\n\ti_max_poss_num_args = i_num_args_required + i_num_args_optional \n\ti_num_args_passed = len( ls_this_argv ) - 1 \n\ts_usage_string = \"\"\n\ts_arg_delimit=\"\"\n\tif b_multi_line_msg == True:\n\t\ts_arg_delimit=OUTPUT_ENDLINE\n\t#end if we put each arg descript on a \n\t#separate line of output\n\n\tif i_num_args_passed < i_num_args_required \\\n\t\t\tor (i_num_args_passed > i_max_poss_num_args and b_unlimited_final_args == False ): \n\t\n\t\t\n\t\ts_scriptname = os.path.basename( ls_this_argv[ 0 ] )\t\n\t\ts_usage_string = \"usage: \" + s_scriptname \n\n\t\tif b_multi_line_msg:\n\t\t\ts_usage_string=\"usage: \" + s_scriptname \\\n\t\t\t\t\t+ OUTPUT_ENDLINE + \"args:\" + OUTPUT_ENDLINE\n\t\telse:\n\t\t\ts_usage_string = \"usage: \" + s_scriptname \n\t\t#end if multi line else not\n\n\t\tfor s_arg in ls_required_arg_descriptions:\n\n\t\t\ts_usage_string += \" <\" \\\n\t\t\t\t+ s_arg + \">\" \\\n\t\t\t\t+ s_arg_delimit\n\t\t#end for each requried arg\n\n\t\tfor s_arg in ls_optional_arg_descriptions:\n\n\t\t\ts_usage_string += \" <(optional) \" \\\n\t\t\t\t+ s_arg + \">\" \\\n\t\t\t\t+ s_arg_delimit\n\t\t#end for each required arg\n\n\t#end if number of args is out of range\n\n\tif s_usage_string != \"\" and s_note is not None:\n\t\ts_usage_string=s_usage_string + OUTPUT_ENDLINE + s_note\n\t#end if note \n\n\treturn s_usage_string \n#def do_usage_check\n\ndef get_proportion_col_number( s_first_line_in_table, s_proportion ):\n\t'''\n\treturns the zero-based index of the column number\n\tthat gives replicate number lists asssociated with\n\tthe s_proportion\n\n\treturns None if the s_proportion has no match in the field names\n\t'''\n\n\tls_first_line_in_table=s_first_line_in_table.strip().split( DELIM_TABLE )\n\ti_num_fields=len( ls_first_line_in_table )\n\ti_col_num=None\n\n\t#first 2 cols are filename and indiv name, so we search idx 2..end:\n\tfor idx in range( 2, i_num_fields ):\n\n\t\tif ls_first_line_in_table[ idx ] == s_proportion:\n\t\t\ti_col_num=idx\n\t\t\tbreak\n\t\t#end if match\n\t#end\n\n\tif i_col_num is None:\n\n\t\ts_msg=\"Can't match given proportion \" + s_proportion + \".\" \\\n\t\t\t\t+ \"Here are the proportions: \" \\\n\t\t\t\t+ \", \".join( ls_first_line_in_table[ 2: ] ) \\\n\t\t\t\t+ \" Please match number of digits exactly.\"\n\t\t\t\n\t\tsys.stderr.write( s_msg + \"\\n\" )\n\t\tsys.exit()\n\t#end if no match\n\n\treturn i_col_num \n\n#end get_proportion_col_number\n\ndef get_reps( s_line, i_proportion_col_index ):\n\n\tls_reps = None\n\t\n\tls_vals=s_line.strip().split( DELIM_TABLE )\n\t\n\ts_reps = ls_vals[ i_proportion_col_index ]\n\n\tls_reps=s_reps.split( DELIM_REP_NUMS )\n\n\treturn ls_reps\n#end get_reps\n\ndef get_non_rep_fields( s_line ):\n\tls_vals=s_line.strip().split( DELIM_TABLE )\n\treturn { \"gpfile\" : ls_vals[ INDEX_TABLE_GP_FILE ],\n\t\t\t\"indiv\" : ls_vals[ INDEX_TABLE_INDIV_NAME ] }\n#end get_individual_name\n\n\ndef get_dict_alleles_by_indiv_from_genepop_file( s_gp_file ):\n\n\tds_alleles_by_indiv={}\n\n\to_gp_file=open( s_gp_file, 'r' )\n\n\t#iterate past the header and loci entries:\n\tfor s_line in o_gp_file:\n\t\tif s_line.lower().startswith( \"pop\" ):\n\t\t\tbreak\n\t\t#end if first pop line reached, break\n\t#end for each line\n\n\t#next line in iteration should be first indiv\n\t#first pop, but for simplicity, we\n\t#always check for a \"pop\" line:\n\tfor s_line in o_gp_file:\n\t\ts_line_stripped=s_line.strip()\n\t\tif not( s_line_stripped.lower() == \"pop\" ):\n\t\t\tif \",\" not in s_line:\n\t\t\t\traise Exception( \"pop entry in \" + s_gp_file \\\n\t\t\t\t\t\t+ \" found no comma seperator. Line: \" \\\n\t\t\t\t\t\t+ s_line )\n\t\t\t#end if no comma\n\t\t\tls_indiv_and_alleles=s_line.strip().split( \",\" )\n\t\t\ts_indiv=ls_indiv_and_alleles[ 0 ].strip()\n\t\t\ts_alleles=ls_indiv_and_alleles[ 1 ].strip()\n\n\t\t\t#we replace the spaces between alleles with tabs\n\t\t\t#so that each (bi)allele can have its own col\n\t\t\t#if loaded in R or spreadsheet\n\t\t\tds_alleles_by_indiv[ s_indiv ] = s_alleles.replace( \" \", \"\\t\" )\n\t\t#end if line not \"pop\" then\n\t#end for each file line\n\t\n\treturn ds_alleles_by_indiv\n\n#end get_dict_alleles_by_indiv_from_genepop_file\n\ndef get_individuals( s_gp_file, s_table, s_proportion, s_replicate, i_output_type ):\n\n\to_table=open( s_table,'r' )\n\n\ti_proportion_col_index=None\n\t\n\ti_line_count = 0\n\n\tds_alleles_by_indiv=None\n\n\tif i_output_type==INDIV_AND_ALLELES:\n\t\tds_alleles_by_indiv=get_dict_alleles_by_indiv_from_genepop_file( s_gp_file )\n\t#end if output is indiv and alleles\t\n\n\tls_individuals=[]\n\n\tfor s_line in o_table:\n\n\t\ti_line_count+=1\n\n\t\tif i_line_count == 1:\n\n\t\t\ti_proportion_col_index = get_proportion_col_number( s_line, s_proportion )\n\t\telse:\n\t\t\tds_non_rep_fields=get_non_rep_fields( s_line )\n\n\t\t\t#we want just the file name, so that it will\n\t\t\t#match the entry in the table:\n\t\t\ts_gp_file_no_path=os.path.basename( s_gp_file )\n\t\t\n\t\t\tif ds_non_rep_fields[ \"gpfile\" ] == s_gp_file_no_path:\n\t\t\t\tls_reps_this_indiv=get_reps( s_line, i_proportion_col_index )\n\t\t\t\tif s_replicate in ls_reps_this_indiv:\n\t\t\t\t\ts_individual=ds_non_rep_fields[ \"indiv\" ] \n\t\t\t\t\tls_individuals.append( s_individual )\n\t\t\t#end if rep is in list\n\t\t#end if first line else not\t\n\t#end for each line in file\n\n\to_table.close()\n\n\tfor s_individual in ls_individuals:\n\n\t\tif i_output_type==INDIV_AND_ALLELES:\n\t\t\tsys.stdout.write( s_individual + \"\\t\" \\\n\t\t\t\t\t+ ds_alleles_by_indiv[ s_individual ] + \"\\n\" )\n\t\telif i_output_type==INDIV_ONLY:\n\t\t\tsys.stdout.write( s_individual + \"\\n\" )\n\t\telse:\n\t\t\traise Exception( \"in def get_individuals, unknown value for output type: \" \\\n\t\t\t\t\t+ str( i_output_type ) )\n\t#end for each individual\n\n\treturn\n#end get_individuals\n\nif __name__ == \"__main__\":\n\n\tdi_output_inclusions={ \"indiv\":INDIV_ONLY, \"alleles\":INDIV_AND_ALLELES }\n\n\tls_args=[ \"genepop file\", \"table file\", \n\t\t\t\"proportion\", \"replicate number\", \n\t\t\t\"\\\"indiv\\\" or \\\"alleles\\\" -- \" \\\n\t\t\t+ \"output will list individuals only or individuals and their alleles (tab-delimited)\" ] \n\t\n\ts_usage=do_usage_check( sys.argv, ls_args )\n\n\tif s_usage:\n\t\tprint( s_usage )\n\t\tsys.exit()\n\t#end if usage\n\n\ts_gp_file=sys.argv[1]\n\ts_table=sys.argv[2]\n\ts_proportion=sys.argv[3]\n\ts_replicate_num=sys.argv[4]\n\ts_output_request=sys.argv[5]\n\t\n\tif s_output_request not in di_output_inclusions:\n\t\t\traise Exception( \"5th argument must one of \" \\\n\t\t\t\t\t+ \" or \".join( [ \"\\\"\" + skey + \"\\\"\" for skey \\\n\t\t\t\t\tin list(di_output_inclusions.keys()) ] ) )\n\telse:\n\t\ti_output_type=di_output_inclusions[ s_output_request ]\n\t#end if 5th arg invalid else get vals\n\n\tget_individuals( s_gp_file, s_table, s_proportion, s_replicate_num, i_output_type )\n\n#end if main\n\n","repo_name":"popgengui/agestrucnb","sub_path":"supplementary_scripts/indiv.from.perc.and.rep.num.py","file_name":"indiv.from.perc.and.rep.num.py","file_ext":"py","file_size_in_byte":7660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"28561529452","text":"import re\nimport wikipedia\n\ndef get_definition(subject : str) -> str:\n regex = r\"(can you)?\\s*(please)?\\s*(define|tell me)\\s*(what|who)?\\s*(is)?\\s*|\\?\"\n clean_subject = re.sub(regex,\"\",subject.lower())\n print(clean_subject)\n try:\n wiki_response = wikipedia.summary(clean_subject, sentences=3)\n response = re.sub(r\"\\([^)]*\\)|'\",\"\",wiki_response)\n response = re.sub(r\"\\s+\",\" \", response)\n except wikipedia.exceptions.DisambiguationError:\n response = \"This concept is ambuigous, please be more specific\"\n except wikipedia.exceptions.PageError:\n response = \"Sorry, I currently do not know the answer to this.\"\n return response\n\nif __name__ == \"__main__\":\n print(get_definition(\"can you define artificial intelligence (AI)?\"))\n","repo_name":"jose-alvarado-guzman/virtual_personal_assistant","sub_path":"VirtualAssistant/define_subject.py","file_name":"define_subject.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"10679498824","text":"from collections import deque\nfrom .control import MouseControl, KeyControl\n\nclass Module:\n\n def __init__(self, name, detector):\n self.name = name\n self.detector = detector\n self.mapping = {}\n self.actions = deque()\n self.dfs = deque()\n self.max_data_len = 60\n self.max_action_len = 1\n \n def add_mapping(self, control, actions):\n if isinstance(actions, str):\n actions = [actions]\n\n if len(actions) > self.max_action_len:\n self.max_action_len = len(actions)\n\n self.mapping[str(actions)] = control\n\n def add_transition(self, control, actions):\n self.add_mapping(control, actions)\n\n def add_mouse_mapping(self, control, action, **params):\n self.add_mapping(MouseControl(control, **params), action)\n\n def add_key_mapping(self, control, action):\n self.add_mapping(KeyControl(control), action)\n\n # actions to control\n def __call__(self, df):\n\n action = self.detector(df)\n self.update_actions(action)\n self.update_dfs(df)\n\n if len(self.actions) < self.max_action_len:\n return False\n\n for action_len in range(self.max_action_len, 0, -1):\n actions = [self.actions[i] for i in range(len(self.actions)-action_len,\n len(self.actions))]\n actions_str = str(actions)\n if actions_str in self.mapping:\n control = self.mapping[actions_str] # control can be a module to transit between modules\n \n if hasattr(control, \"method_name\") and \"move_diff\" in control.method_name:\n if control.method_name == \"right_move_diff\":\n fix_points = [\"r_w\"]\n if control.method_name == \"left_move_diff\":\n fix_points = [\"l_w\"]\n\n fix_points_cols_x = [c + \"_x\" for c in fix_points]\n fix_points_cols_y = [c + \"_y\" for c in fix_points]\n control.set_params(df_data_1_x = self.dfs[-2][fix_points_cols_x], \n df_data_2_x = self.dfs[-1][fix_points_cols_x],\n df_data_1_y = self.dfs[-2][fix_points_cols_y],\n df_data_2_y = self.dfs[-1][fix_points_cols_y])\n\n return control\n\n return False\n\n def update_actions(self, action):\n self.actions.append(action)\n if len(action) > self.max_data_len:\n self.actions.pop_left()\n\n def update_dfs(self, df):\n self.dfs.append(df)\n if len(df) > self.max_data_len:\n self.dfs.pop_left()\n\n def reset(self):\n self.actions = deque()\n self.dfs = deque()\n ","repo_name":"dabit-lucas/hac","sub_path":"pyhac/module/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"91"} +{"seq_id":"4393808026","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Question',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('question_number', models.SmallIntegerField()),\n ('a_points', models.SmallIntegerField(null=True)),\n ('b_points', models.SmallIntegerField(null=True)),\n ('c_points', models.SmallIntegerField(null=True)),\n ('d_points', models.SmallIntegerField(null=True)),\n ('e_points', models.SmallIntegerField(null=True)),\n ('f_points', models.SmallIntegerField(null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Results',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('a', models.BooleanField()),\n ('b', models.BooleanField()),\n ('c', models.BooleanField()),\n ('d', models.BooleanField()),\n ('e', models.BooleanField()),\n ('f', models.BooleanField()),\n ('question_id', models.ForeignKey(to='testownik.Question')),\n ],\n ),\n migrations.CreateModel(\n name='Sheet',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('sheet_number', models.SmallIntegerField()),\n ('points', models.SmallIntegerField(null=True)),\n ],\n options={\n 'verbose_name': 'Arkusz',\n 'verbose_name_plural': 'Arkusze',\n },\n ),\n migrations.CreateModel(\n name='SheetQuestions',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('order_number', models.IntegerField()),\n ('answer_order', models.CharField(max_length=6)),\n ('question_id', models.ForeignKey(to='testownik.Question')),\n ('sheet_id', models.ForeignKey(to='testownik.Sheet')),\n ],\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=30)),\n ('last_name', models.CharField(max_length=30)),\n ('index_number', models.IntegerField()),\n ],\n options={\n 'verbose_name_plural': 'Studenci',\n },\n ),\n migrations.CreateModel(\n name='Test',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('start_time', models.DateTimeField()),\n ('end_time', models.DateTimeField()),\n ('time', models.SmallIntegerField(null=True)),\n ],\n options={\n 'verbose_name': 'Test',\n 'verbose_name_plural': 'Testy',\n },\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('is_teacher', models.BooleanField(default=False)),\n ('is_supervisor', models.BooleanField(default=False)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Profil u\\u017cytkownika',\n 'verbose_name_plural': 'Profile u\\u017cytkownik\\xf3w',\n },\n ),\n migrations.AddField(\n model_name='test',\n name='author_id',\n field=models.ForeignKey(to='testownik.UserProfile'),\n ),\n migrations.AddField(\n model_name='sheet',\n name='student_id',\n field=models.ForeignKey(to='testownik.Student'),\n ),\n migrations.AddField(\n model_name='sheet',\n name='test_id',\n field=models.ForeignKey(to='testownik.Test'),\n ),\n migrations.AddField(\n model_name='results',\n name='sheet_id',\n field=models.ForeignKey(to='testownik.Sheet'),\n ),\n ]\n","repo_name":"piotut/TestSystem","sub_path":"testownik/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"27663063998","text":"#구간 합 구하기 5\nimport sys\nr = sys.stdin.readline\n\nnum, add_num = map(int, r().split())\nnumbers = [[0 for _ in range(num+1)]]\nadd_sum = [[0 for _ in range(num+1)] for _ in range(num+1)]\nanswers = []\n\nfor _ in range(num):\n numbers.append([0] + list(map(int, r().split())))\n\nfor i in range(1, num+1):\n for j in range(1, num+1):\n add_sum[i][j] = add_sum[i-1][j] + add_sum[i][j-1] - add_sum[i-1][j-1] + numbers[i][j]\n\nfor _ in range(add_num):\n start, end, start2, end2 = map(int, r().split())\n answers.append(add_sum[start2][end2] - add_sum[start-1][end2] - add_sum[start2][end-1] + add_sum[start-1][end-1])\n\nfor answer in answers:\n print(answer)","repo_name":"SunghyunChoi/Algorithm","sub_path":"정글/WEEK09/11660.py","file_name":"11660.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"41719182787","text":"import time\r\nfrom models.mensagem import Mensagem\r\nfrom models.correntista import Correntista as CorrentistaModel\r\n\r\n\r\nclass Correntista:\r\n def __init__(self, registros_auditoria):\r\n self.__mensagem = Mensagem()\r\n self.__registros_auditoria = registros_auditoria\r\n self.__nome = None\r\n self.__cpf = None\r\n self.__saldo = None\r\n\r\n def cadastrar(self):\r\n self.__mensagem.mensagens(\r\n titulo=\"Cadastrar Correntista\"\r\n )\r\n self.__mensagem.print()\r\n\r\n try:\r\n if not self.__nome:\r\n self.__nome = str(input(\"Nome: \"))\r\n\r\n if not self.__cpf:\r\n self.__cpf = str(input(\"Cpf: \"))\r\n\r\n if not self.__saldo:\r\n self.__saldo = float(input(\"Depósito inicial: R$ \"))\r\n\r\n correntista = CorrentistaModel(\r\n self.__registros_auditoria,\r\n self.__nome,\r\n self.__cpf,\r\n self.__saldo\r\n )\r\n\r\n self.__mensagem.mensagens(\r\n sucesso=\"Correntista {0} cadastrado com sucesso\".format(\r\n self.__nome\r\n ),\r\n info=\"Você será redirecionado para tela inicial. Aguarde...\"\r\n )\r\n self.__mensagem.print()\r\n time.sleep(2)\r\n return correntista\r\n except ValueError:\r\n self.__mensagem.mensagens(\r\n alerta=\"Preencha novamente o dado corretamente!\"\r\n )\r\n return self.cadastrar()\r\n except Exception:\r\n self.__mensagem.mensagens(\r\n alerta=\"Tente novamente em alguns instantes!\"\r\n )\r\n return self.cadastrar()\r\n\r\n def selecionar(self, correntistas):\r\n self.__mensagem.mensagens(\r\n titulo=\"Selecionar Correntista\"\r\n )\r\n self.__mensagem.print()\r\n\r\n try:\r\n for (id, correntista) in enumerate(correntistas):\r\n print(\"{0} - {1}\".format(id, correntista.nome()))\r\n\r\n opcao = int(input())\r\n return correntistas[opcao]\r\n except (ValueError, IndexError):\r\n self.__mensagem.mensagens(\r\n alerta=\"Erro ao selecionar o correntista!\"\r\n )\r\n return self.selecionar(correntistas)\r\n","repo_name":"mayronceccon/olist-python-labs-project","sub_path":"views/correntista.py","file_name":"correntista.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"5647590204","text":"#!/usr/bin/env python3\n\n\n\"\"\"\nGiven a project and sample IDs, copies cram files for\neach sample listed into the project's release bucket.\n\"\"\"\n\nimport logging\nimport sys\nimport subprocess\nimport time\nimport click\n\n# pylint: disable=E0401,E0611\nfrom cpg_utils.config import get_config\nfrom sample_metadata.apis import AnalysisApi\nfrom sample_metadata.models import AnalysisType\n\n\ndef check_paths_exist(paths: list[str]):\n \"\"\"\n Checks a list of gs:// paths to see if they point to an existing blob\n Logs the invalid paths if any are found\n \"\"\"\n invalid_paths = False\n for path in paths:\n # gsutil ls returns '\\n' if path exists\n result = subprocess.run(\n ['gsutil', 'ls', path], check=True, capture_output=True, text=True\n ).stdout.strip('\\n')\n if result == path:\n continue\n # If path does not exist, log the path and set invalid_paths to True\n logging.info(f'Invalid path: {path}')\n invalid_paths = True\n\n if invalid_paths:\n return False\n return True\n\n\ndef copy_to_release(project: str, billing_project: str, paths: list[str]):\n \"\"\"\n Copy many files from main bucket paths to the release bucket with todays date as directory\n \"\"\"\n today = time.strftime('%Y-%m-%d')\n release_path = f'gs://cpg-{project}-release/{today}/'\n\n subprocess.run(\n [\n 'gcloud',\n 'storage',\n '--billing-project',\n billing_project,\n 'cp',\n *paths,\n release_path,\n ],\n check=True,\n )\n logging.info(f'Copied {paths} into {release_path}')\n\n\n@click.command()\n@click.option('--project', '-p', help='Metamist name of the project', default='')\n@click.option('--billing-project', '-b', help='The GCP billing project to use')\n@click.argument('samples', nargs=-1)\ndef main(project: str, billing_project: str, samples):\n \"\"\"\n\n Parameters\n ----------\n project : a metamist project name, optional as it can be pulled from the AR config\n samples : a list of sample ids to copy to the release bucket\n \"\"\"\n if not project:\n config = get_config()\n project = config['workflow']['dataset']\n\n if not billing_project:\n billing_project = project\n\n sample_ids = list(samples)\n\n # Retrieve latest crams for selected samples\n latest_crams = AnalysisApi().get_latest_analysis_for_samples_and_type(\n AnalysisType('cram'), project, request_body=sample_ids\n )\n\n # Get all paths of files to be copied to release\n cram_paths = []\n for cram in latest_crams:\n cram_paths.append(cram['output'])\n cram_paths.append(cram['output'] + '.crai')\n\n # Check if all paths are valid and execute the copy commands if they are\n if check_paths_exist(cram_paths):\n copy_to_release(project, billing_project, cram_paths)\n\n\nif __name__ == '__main__':\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(levelname)s %(module)s:%(lineno)d - %(message)s',\n datefmt='%Y-%M-%d %H:%M:%S',\n stream=sys.stderr,\n )\n\n main() # pylint: disable=no-value-for-parameter\n","repo_name":"populationgenomics/analysis-runner","sub_path":"scripts/copy_sample_cram_to_release.py","file_name":"copy_sample_cram_to_release.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"91"} +{"seq_id":"25910321865","text":"import locale\nimport time\nimport os\nfrom math import log10\n\n#try:\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter.messagebox import askyesnocancel, showerror\nfrom urllib.parse import unquote\n\nPATH = os.path.dirname(__file__)\n\n# --- images\nIM_HOME = os.path.join(PATH, \"images\", \"home.png\")\nIM_FOLDER = os.path.join(PATH, \"images\", \"dossier.png\")\nIM_FOLDER_LINK = os.path.join(PATH, \"images\", \"dossier_link.png\")\nIM_NEW = os.path.join(PATH, \"images\", \"new_folder.png\")\nIM_FILE = os.path.join(PATH, \"images\", \"file.png\")\nIM_FILE_LINK = os.path.join(PATH, \"images\", \"file_link.png\")\nIM_DRIVE = os.path.join(PATH, \"images\", \"drive.png\")\nIM_RECENT = os.path.join(PATH, \"images\", \"recent.png\")\nIM_RECENT_24 = os.path.join(PATH, \"images\", \"recent_24.png\")\n\n\ndef _(text):\n return text\n\n\nSIZES = [(_(\"B\"), 1), (\"kB\", 1e3), (\"MB\", 1e6), (\"GB\", 1e9), (\"TB\", 1e12)]\n\nTODAY = time.strftime(\"%x\")\nYEAR = time.strftime(\"%Y\")\nDAY = int(time.strftime(\"%j\"))\n\n\n# --- functions\ndef add_trace(variable, mode, callback):\n try:\n return variable.trace_add(mode, callback)\n except AttributeError:\n return variable.trace(mode[0], callback)\n\n\ndef remove_trace(variable, mode, cbname):\n try:\n variable.trace_remove(mode, cbname)\n except AttributeError:\n variable.trace_vdelete(mode[0], cbname)\n\n\ndef get_modification_date(file):\n \"\"\"Return the modification date of file.\"\"\"\n tps = time.localtime(os.path.getmtime(file))\n date = time.strftime(\"%x\", tps)\n if date == TODAY:\n date = _(\"Today\") + time.strftime(\" %H:%M\", tps)\n elif time.strftime(\"%Y\", tps) == YEAR and (DAY - int(time.strftime(\"%j\", tps))) < 7:\n date = time.strftime(\"%A %H:%M\", tps)\n return date\n\n\ndef get_size(file):\n \"\"\"Return the size of file.\"\"\"\n size_o = os.path.getsize(file)\n if size_o > 0:\n m = int(log10(size_o) // 3)\n if m < len(SIZES):\n unit, div = SIZES[m]\n else:\n unit, div = SIZES[-1]\n size = \"%s %s\" % (locale.format_string(\"%.1f\", size_o / div), unit)\n else:\n size = \"0 \" + _(\"B\")\n return size\n","repo_name":"frank038/tkpdfreader","sub_path":"pdfreader/tkfilebrowser/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"31030142376","text":"import numpy as np\nimport pandas as pd\n\ndataset2 = pd.read_csv(\"dataset2.csv\", header=\"infer\")\ninputs = np.array(dataset2[[\"x1\"]])\noutputs = np.array(dataset2[\"y\"])\n\ndef f(input):\n return input[0]**2\n\nsq_errors = np.zeros(len(outputs))\nfor i in range(len(outputs)):\n error = f(inputs[i]) - outputs[i]\n sq_errors[i] = error**2\nprint(np.mean(sq_errors))","repo_name":"DuBose-Tuller/CSC370_HW2","sub_path":"testMSE.py","file_name":"testMSE.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"7649108832","text":"import json\nimport os\n\nimport requests\nfrom pkg_resources import resource_filename\n\nnames_dictionary_file = os.path.join(resource_filename('randomcsv.resources.dictionaries', ''), 'firstNames.txt')\napi_url = 'https://uinames.com/api/'\n\n\ndef getNamesBatch(count=500):\n response = requests.get(f'{api_url}?amount={count}')\n data = json.loads(response.text)\n return data\n\n\ndef print_first_names(name_dicts):\n with open(names_dictionary_file, 'w') as file:\n for name in name_dicts:\n if name['name'].strip():\n file.write(name['name'] + '\\n')\n\n\nif __name__ == '__main__':\n names = getNamesBatch()\n print_first_names(names)\n","repo_name":"PhilipBuhr/randomCsv","sub_path":"src/spider/NameSpider.py","file_name":"NameSpider.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"3427048925","text":"from twisted.internet.defer import inlineCallbacks\n\nfrom Tribler.community.gigachannel.community import ChannelDownloadCache, GigaChannelCommunity\nfrom Tribler.pyipv8.ipv8.peer import Peer\nfrom Tribler.pyipv8.ipv8.test.base import TestBase\nfrom Tribler.Test.mocking.channel import MockChannel\nfrom Tribler.Test.mocking.download import MockDownload\nfrom Tribler.Test.mocking.session import MockSession\n\n\nclass TestGigaChannelUnits(TestBase):\n\n \"\"\"\n Unit tests for the GigaChannel community which do not need a real Session.\n \"\"\"\n\n def setUp(self):\n super(TestGigaChannelUnits, self).setUp()\n self.session = MockSession()\n\n self.initialize(GigaChannelCommunity, 1)\n\n def create_node(self, *args, **kwargs):\n kwargs['tribler_session'] = self.session\n return super(TestGigaChannelUnits, self).create_node(*args, **kwargs)\n\n def _setup_fetch_next(self):\n \"\"\"\n Setup phase for fetch_next() tests.\n\n Provides:\n - Database entry for a mocked Channel.\n - download_channel() functionality for the mocked channel.\n - Pending overlay.download_queue for the mocked channel.\n \"\"\"\n channel, download = self._setup_download_completed()\n self.session.lm.set_download_channel(download)\n self.nodes[0].overlay.download_queue = [channel.infohash]\n\n return channel, download\n\n def _setup_download_completed(self):\n \"\"\"\n Setup phase for the download_completed() tests.\n\n Provides:\n - Database entry for a mocked Channel.\n - Mocked (empty) download_channel() functionality.\n \"\"\"\n channel = MockChannel('\\x00' * 20, 'LibNaCLPK:' + '\\x00' * 64, 'test', 1, 0)\n self.session.lm.mds.ChannelMetadata.add(channel)\n download = MockDownload()\n download.tdef.set_infohash(channel.infohash)\n\n return channel, download\n\n def test_select_random_none(self):\n \"\"\"\n No entries in the database should yield no results.\n \"\"\"\n channel_list = []\n self.session.lm.mds.ChannelMetadata.set_random_channels(channel_list)\n\n entries = self.nodes[0].overlay.get_random_entries()\n\n self.assertEqual(0, len(entries))\n\n def test_select_random_one(self):\n \"\"\"\n One entry in the database should yield one result.\n \"\"\"\n channel_list = [MockChannel('\\x00' * 20, 'LibNaCLPK:' + '\\x00' * 64, 'test', 1, 0)]\n self.session.lm.mds.ChannelMetadata.set_random_channels(channel_list)\n\n entries = self.nodes[0].overlay.get_random_entries()\n\n self.assertEqual(1, len(entries))\n self.assertEqual(entries[0].infohash, channel_list[0].infohash)\n self.assertEqual(entries[0].public_key, channel_list[0].public_key[10:])\n self.assertEqual(entries[0].title, channel_list[0].title)\n self.assertEqual(entries[0].version, channel_list[0].version)\n\n def test_select_random_many(self):\n \"\"\"\n Six entries in the database should yield six results.\n \"\"\"\n channel_list = [MockChannel('\\x00' * 20, 'LibNaCLPK:' + '\\x00' * 64, 'test', 1, 0)] * 6\n self.session.lm.mds.ChannelMetadata.set_random_channels(channel_list)\n\n entries = self.nodes[0].overlay.get_random_entries()\n\n self.assertEqual(6, len(entries))\n for entry in entries:\n self.assertEqual(entry.infohash, channel_list[0].infohash)\n self.assertEqual(entry.public_key, channel_list[0].public_key[10:])\n self.assertEqual(entry.title, channel_list[0].title)\n self.assertEqual(entry.version, channel_list[0].version)\n\n def test_select_random_too_many(self):\n \"\"\"\n Ten entries in the database should be capped at seven results.\n \"\"\"\n channel_list = [MockChannel('\\x00' * 20, 'LibNaCLPK:' + '\\x00' * 64, 'test', 1, 0)] * 10\n self.session.lm.mds.ChannelMetadata.set_random_channels(channel_list)\n\n entries = self.nodes[0].overlay.get_random_entries()\n\n self.assertEqual(7, len(entries))\n for entry in entries:\n self.assertEqual(entry.infohash, channel_list[0].infohash)\n self.assertEqual(entry.public_key, channel_list[0].public_key[10:])\n self.assertEqual(entry.title, channel_list[0].title)\n self.assertEqual(entry.version, channel_list[0].version)\n\n def test_update_with_download(self):\n \"\"\"\n Test if an update with a download extracts the seeder count as votes.\n \"\"\"\n channel, download = self._setup_download_completed()\n\n self.assertEqual(0, channel.votes)\n\n self.nodes[0].overlay.update_from_download(download)\n\n self.assertEqual(42, channel.votes)\n\n def test_download_completed_no_token(self):\n \"\"\"\n Test if the download completed callback extracts the seeder count as votes.\n \"\"\"\n channel, download = self._setup_download_completed()\n\n self.assertEqual(0, channel.votes)\n\n self.nodes[0].overlay.download_completed(download)\n\n self.assertEqual(42, channel.votes)\n\n def test_download_completed_with_token(self):\n \"\"\"\n Test if the download completed callback releases the download token.\n \"\"\"\n channel, download = self._setup_download_completed()\n\n token = ChannelDownloadCache(self.nodes[0].overlay.request_cache)\n self.nodes[0].overlay.request_cache.add(token)\n\n self.nodes[0].overlay.download_completed(download)\n\n self.assertFalse(self.nodes[0].overlay.request_cache.has(token.prefix, token.number))\n\n def test_fetch_next_no_token(self):\n \"\"\"\n Test if nothing happens when we fetch the next download without holding the download token.\n \"\"\"\n channel, download = self._setup_fetch_next()\n\n token = ChannelDownloadCache(self.nodes[0].overlay.request_cache)\n self.nodes[0].overlay.request_cache.add(token)\n\n self.nodes[0].overlay.fetch_next()\n\n self.nodes[0].overlay.request_cache.pop(token.prefix, token.number)\n\n self.assertEqual(1, len(self.nodes[0].overlay.download_queue))\n\n def test_fetch_next_already_known(self):\n \"\"\"\n Test if we throw out a download when we fetch a download we already know.\n \"\"\"\n channel, download = self._setup_fetch_next()\n self.session.add_known_infohash(channel.infohash)\n\n self.nodes[0].overlay.fetch_next()\n\n self.assertEqual(0, len(self.nodes[0].overlay.download_queue))\n\n @inlineCallbacks\n def test_fetch_next(self):\n \"\"\"\n Test if we download a channel if we have nothing else to do.\n \"\"\"\n channel, download = self._setup_fetch_next()\n\n self.nodes[0].overlay.fetch_next()\n\n self.assertTrue(self.session.lm.downloading)\n\n self.assertEqual(0, channel.votes)\n\n self.session.lm.finish_download_channel()\n\n yield self.session.lm.downloaded_channel_deferred\n\n self.assertFalse(self.session.lm.downloading)\n self.assertEqual(42, channel.votes)\n\n @inlineCallbacks\n def test_send_random_to_known_new(self):\n \"\"\"\n Test if we do not add new downloads to the queue if we get sent a new channel.\n \"\"\"\n channel = MockChannel('\\x00' * 20, 'LibNaCLPK:' + '\\x00' * 64, 'test', 1, 0)\n self.session.lm.mds.ChannelMetadata.set_random_channels([channel])\n\n self.nodes[0].overlay.send_random_to(Peer(self.nodes[0].my_peer.public_key, self.nodes[0].endpoint.wan_address))\n\n yield self.deliver_messages()\n\n self.assertEqual(1, len(self.nodes[0].overlay.download_queue))\n self.assertIn(channel.infohash, self.nodes[0].overlay.download_queue)\n\n @inlineCallbacks\n def test_send_random_to_known_update(self):\n \"\"\"\n Test if we do not add new downloads to the queue if we get sent a new channel.\n \"\"\"\n old_channel = MockChannel('\\x00' * 20, 'LibNaCLPK:' + '\\x00' * 64, 'test', 1, 0)\n self.session.lm.mds.ChannelMetadata.add(old_channel)\n new_channel = MockChannel('\\x01' * 20, 'LibNaCLPK:' + '\\x00' * 64, 'test', 2, 0)\n self.session.lm.mds.ChannelMetadata.set_random_channels([new_channel])\n\n self.nodes[0].overlay.send_random_to(Peer(self.nodes[0].my_peer.public_key, self.nodes[0].endpoint.wan_address))\n\n yield self.deliver_messages()\n\n self.assertEqual(1, len(self.nodes[0].overlay.download_queue))\n self.assertIn(old_channel.infohash, self.nodes[0].overlay.download_queue)\n self.assertEqual(old_channel.infohash, new_channel.infohash)\n","repo_name":"sumonst21/tribler","sub_path":"Tribler/Test/Community/gigachannel/test_community.py","file_name":"test_community.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"15303670775","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"UNIVERSIDAD AUTÓNOMA DEL ESTADO DE MÉXICO\nCU UAEM ZUMPANGO \nUA: Graficación computacional\nTema: Translacion\nAlumno: Keren Mitsue Ramírez Vergara\nProfesor: Manuel Almeida Vázquez\nDescripción: Generar una animación de la translación de una figura\n Considere mostrar cada una de las translaciones.\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time as tm\n\n\nP = np.array([ [2,3],[-4,7], [1,-5],[2,3] ], dtype=float)\nh = 0.5\nk= -3\nplt.axis([-5,12,-6,10]) #mantener ejes fijos\n\nfor i in range(1,10):\n plt.plot(P[:,0] + i*h, P[:,1])\n plt.axis('off')\n plt.pause(0.9)\n \ntm.sleep(2)\n\n","repo_name":"kerenmitsue18/Computational-Graphics","sub_path":"Transformaciones/Triangulo/trans_animation.py","file_name":"trans_animation.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"12292473851","text":"from ebooklib import epub\nimport os\n\nimport os\n\ndef create_epub(epub_title, language, author, identifier, chapters_path, interval_size, initial_chapter):\n intervals = get_chapters(chapters_path, interval_size, initial_chapter)\n for interval in intervals:\n \n book = epub.EpubBook()\n \n book.set_title(epub_title)\n book.set_language(language)\n book.add_author(author)\n book.set_identifier(identifier)\n book.set_cover('cover.jpg', open(f'{chapters_path}/cover/cover.jpg', 'rb').read())\n\n chapters_ebook = []\n pages = []\n\n page_style = '''\n body { \n margin: 0; \n padding:0;\n display: flex;\n justify-content: center;\n align-items: center;\n height: 100%;\n overflow: hidden;\n } \n img { \n width: 100%; \n height: 100%; \n object-fit: fill;\n }\n \n '''\n\n page_css = epub.EpubItem(uid=\"page_styles\",\n file_name=\"page.css\",\n media_type=\"text/css\",\n content=page_style)\n book.add_item(page_css)\n \n chapter_style = '''\n body {\n margin: 0;\n padding: 0;\n text-align: center;\n height: 100%;\n width: 100%;\n vertical-align: middle;\n overflow: hidden;\n }\n\n img {\n position: absolute;\n top: 0;\n left: 0;\n width: 100%;\n height: 100%;\n object-fit: fill;\n pointer-events: none;\n opacity: 0.3;\n }\n\n h1 {\n text-align: center;\n font-size: 50px;\n font-family: 'Impact'\n margin-top: 40vh;\n background-color: white;\n padding: 20px;\n color: black;\n display: inline-block;\n opacity: 1.0;\n position: relative;\n z-index: 1; \n border-radius: 10px;\n }\n '''\n\n chapter_css = epub.EpubItem(uid=\"style_chapter\",\n file_name=\"chapter.css\",\n media_type=\"text/css\",\n content=chapter_style)\n book.add_item(chapter_css)\n\n chap_cover_epub = epub.EpubImage(\n uid='chap_cover',\n file_name='chap_cover.jpg',\n media_type='image/jpeg',\n content=open(f'{chapters_path}/cover/cover.jpg', 'rb').read()\n )\n book.add_item(chap_cover_epub)\n \n for chapter in interval:\n title = chapter['title']\n\n page_chapter = epub.EpubHtml(title=title,\n file_name=f'{title}.xhtml',\n content=f'\"chap_cover\"

    {title}

    ')\n\n \n page_chapter.add_item(chapter_css)\n book.add_item(page_chapter)\n chapters_ebook.append(page_chapter)\n pages.append(page_chapter)\n\n images = get_images(chapter['chapter_path'])\n\n for i, image in enumerate(images):\n path = image['path'].split(\"\\\\\")[-1]\n image_epub = epub.EpubImage(\n uid=path,\n file_name=f'{path}',\n media_type='image/jpeg',\n content=image['content']\n )\n container_image = epub.EpubHtml(uid=f'{title}_{i}',title=f'Página {i}', file_name=f'{title}-{i}.xhtml')\n container_image.set_content(f'')\n \n container_image.add_item(page_css)\n book.add_item(container_image)\n book.add_item(image_epub)\n pages.append(container_image)\n\n style = 'body { font-family: Times, Times New Roman, serif; }'\n\n nav_css = epub.EpubItem(uid=\"style_nav\",\n file_name=\"style/nav.css\",\n media_type=\"text/css\",\n content=style)\n book.add_item(nav_css)\n\n book.toc = chapters_ebook\n\n spine = ['nav']\n for page in pages:\n spine.append(page)\n\n book.spine = spine\n book.add_item(epub.EpubNcx())\n book.add_item(epub.EpubNav())\n\n if not os.path.exists('epubs'):\n os.makedirs('epubs')\n if not os.path.exists(f'epubs/{epub_title}'):\n os.makedirs(f'epubs/{epub_title}')\n\n epub.write_epub(f'epubs/{epub_title}/{epub_title}_{interval[0][\"title\"]}-{interval[len(interval)-1][\"title\"]}.epub', book)\n \n\ndef is_convertible_to_float(value):\n try:\n float(value)\n return True\n except ValueError:\n return False\n\ndef get_chapters(chapters_path, interval_size, initial_chapter):\n intervals = []\n initial_chapter = initial_chapter - 1\n chapters_dir = []\n\n for value in os.listdir(chapters_path):\n try:\n float(value)\n chapters_dir.append(value)\n except ValueError:\n pass\n\n chapters_dir = sorted(chapters_dir, key=lambda x: float(x))\n end_chapter = min(interval_size + initial_chapter, len(chapters_dir))\n\n while initial_chapter < len(chapters_dir):\n chapters = []\n\n chapters = chapters_dir[initial_chapter:end_chapter]\n\n interval = []\n for chapter in chapters:\n chapters_dir_path = os.path.join(chapters_path, chapter)\n if os.path.isdir(chapters_dir_path):\n interval.append({\n 'title': f'Capítulo {chapter}',\n 'chapter_path': chapters_dir_path\n })\n\n intervals.append(interval)\n\n initial_chapter += interval_size\n end_chapter = min(end_chapter + interval_size, len(chapters_dir))\n return intervals\n\ndef get_images(chapter_dir):\n images = []\n\n for image_file in sorted(os.listdir(chapter_dir)):\n image_path = os.path.join(chapter_dir, image_file)\n if os.path.isfile(image_path):\n with open(image_path, 'rb') as file:\n content = file.read()\n images.append({\n 'path': image_path,\n 'content': content\n })\n\n return images \n","repo_name":"GuilhermeGomes1929/manga_extractor","sub_path":"epub_service.py","file_name":"epub_service.py","file_ext":"py","file_size_in_byte":6710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"25683909864","text":"import os\n\nimport discord\nfrom discord.ext import commands\n\n\nclass DiscordClient(commands.Bot):\n def __init__(self):\n super().__init__(\n command_prefix=commands.when_mentioned_or(\"!\"),\n intents=discord.Intents.all(),\n help_command=commands.DefaultHelpCommand(dm_help=True),\n )\n\n async def setup_hook(self):\n print(f\"Logged in as {self.user.name}\")\n for folder in os.listdir(\"modules\"):\n if os.path.exists(os.path.join(\"modules\", folder, \"cog.py\")):\n await self.load_extension(f\"modules.{folder}.cog\")\n print(\"Loaded cogs\")\n","repo_name":"salsasteve/gottithebot","sub_path":"backend/app/DiscordClient.py","file_name":"DiscordClient.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"20561477444","text":"from TMotorManager import TMotorManager\nimport numpy as np\nfrom SoftRealtimeLoop import SoftRealtimeLoop\n\n\n\nwith TMotorManager(motor_type='AK80-9', motor_ID=3, CSV_file=\"log.csv\") as motor3:\n motor3.power_on()\n motor3.zero_position() # has a delay!\n motor3.set_impedance_gains_real_unit(K=10,B=0.5)\n \n loop = SoftRealtimeLoop(dt = 0.01, report=True, fade=0)\n for t in loop:\n motor3.update()\n if t < 1.0:\n motor3.i = 0.0\n elif t < 4:\n motor3.i = 0.25\n\n del loop","repo_name":"mitry-anderson/TControl","sub_path":"src/TMotorCANControl/old/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"39687678645","text":"#!/usr/bin/env python3\n# ศิลาลักษณ์ แก้วจันทร์เพชร\n# 610510670\n# Lab 07\n# Problem 3\n# 204111 Sec 001\n\n\ndef main():\n n = int(input(\"\"))\n triangle(n)\n\n\ndef triangle(n):\n for i in range(n):\n for j in range(n):\n if (i == j):\n print(\"*\", end=\" \")\n elif (i == (n - 1)):\n print(\"*\", end=\" \")\n elif (j == 0):\n print(\"*.\", end=\" \")\n elif (i < j):\n print(\" \", end=\" \")\n else:\n print(\".\", end=\" \")\n\n print()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"KALALIZ/PythonCode","sub_path":"Term1/Lab07/Lab07_3_610510670.py","file_name":"Lab07_3_610510670.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70579996464","text":"\"\"\"\nDifferent approaches for matrix factorization.\nThe methods should all be accessed through the factory method 'factorize_matrix'.\n\nFor a list of available methods run print_methods() or just look at the _mfs_factory dictionary.\n\nAuthor: Moshe Lichman\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nimport abc\nimport time\n\nfrom utils import log_utils as log\nfrom utils import file_utils as fu\n\nfrom os.path import join\nfrom scipy.sparse.linalg import svds\nfrom sklearn.decomposition import NMF as sk_NMF\n\n\nclass _MFS(object):\n \"\"\"\n Template for a matrix factorization class.\n \"\"\"\n @abc.abstractmethod\n def get_factorized_mat(self, data, dim, area):\n raise NotImplementedError\n\n\nclass _NMF(_MFS):\n \"\"\"\n The latent space is found using Poission matrix factorization.\n\n Currently we're using the most out-of-the-box run of NMF. This could be investigated further.\n \"\"\"\n def get_factorized_mat(self, data, dim, area):\n log.info('Running sklearn NMF')\n start = time.time()\n\n model = sk_NMF(n_components=dim, init='random', random_state=0)\n W = model.fit_transform(data.toarray()) # It can't run on the sparse representation :(\n H = model.components_\n mf = np.dot(W, H)\n\n log.info('Factorizing took %d seconds' % (time.time() - start))\n return mf\n\n\nclass _HBNMF(_MFS):\n \"\"\"\n Hierarchical Bayes Poisson MF. We use the code that was published with the paper.\n The factorization is done in the c++ code and here we simply load the two matrices and combine\n them into the mf.\n\n NOTE: IOError will be raised if the files are not in their root directory.\n \"\"\"\n\n def get_factorized_mat(self, data, dim, area):\n log.info('Loading hierarchical bayes NMF')\n\n start = time.time()\n I, L = data.shape\n\n assert area is not None\n\n root_dir = '/extra/disij0/data/person_mf/%s/hier_nmf' % area\n\n htheta = fu.load_np_txt(join(root_dir, 'htheta.tsv'), delimiter='\\s\\s\\s\\s')\n htheta = self._fix_projection(htheta, I, dim)\n\n hbeta = fu.load_np_txt(join(root_dir, 'hbeta.tsv'), delimiter='\\s\\s\\s\\s')\n hbeta = self._fix_projection(hbeta, L, dim)\n\n mf = htheta.dot(hbeta.T)\n\n log.info('Factorizing took %d seconds' % (time.time() - start))\n return mf\n\n\n def _fix_projection(self, mat, items, dim):\n \"\"\"\n Both matrices needs to be fixed first. In the c++ code they ignore locations with\n no data and they change the entire projection.\n\n INPUT:\n -------\n 1. mat: <( <= items, dim + 2) ndarray> htheta or hbeta. There could be less rows than items because\n in the cpp code if a location didn't have data in training they\n remove it. I don't.\n Each row is [their_id, my_id, [factor_values]]\n 2. items: number of individual or location.\n 3. dims: number of hidden latent space.\n\n OUTPUT:\n --------\n 1. fixed: <(items, dim) ndarray> fixed projection matrix\n \"\"\"\n fixed = np.zeros([items, dim])\n my_ids = mat[:,1] - 1 # Because Disi added 1 to my 0'based projection\n values = mat[:,2]\n\n for i in range(mat.shape[0]):\n fixed[my_ids[i]] = values[i]\n\n return fixed\n\n\nclass _SVD(_MFS):\n \"\"\"\n The latent space is found using \"greedy\" svd, finding the largest dim components.\n \"\"\"\n def get_factorized_mat(self, data, dim, area):\n log.info('Running numpy svds')\n start = time.time()\n\n # For SVD we need to remove the mean from the data.\n tmp = np.copy(np.array(data.toarray()))\n m = np.mean(tmp, axis=0)\n tmp -= m\n\n u, s, v = svds(tmp, dim)\n W = u\n H = np.dot(np.diag(s), v)\n mf = np.dot(W, H)\n mf += m\n log.info('Factorizing took %d seconds' % (time.time() - start))\n\n return mf\n\n\nclass _Memory(_MFS):\n \"\"\"\n No latent space, this is memory bases. No smoothing\n \"\"\"\n def get_factorized_mat(self, data, dim, area):\n temp = np.array(data.toarray())\n #alpha = 0.01 # alpha is smoothing parameter\n #temp = temp * (1-alpha) + np.mean(temp,axis = 0)*alpha \n return temp\n\nclass _S_Memory(_MFS):\n \"\"\"\n No latent space, this is memory bases. Smoothed by column.\n \"\"\"\n def get_factorized_mat(self, data, dim, area):\n temp = np.array(data.toarray())\n alpha = 0.01 # alpha is smoothing parameter\n temp = temp * alpha + np.mean(temp,axis = 0)*(1-alpha) \n return temp\n\nclass _Popularity(_MFS):\n \"\"\"\n Return popularity of places\n \"\"\"\n def get_factorized_mat(self, data, dim, area):\n temp = np.array(data.toarray())\n P = np.zeros(data.shape)\n P += np.mean(temp,axis = 0)\n return P\n\n\n\n\"\"\"\n*******************************************************************************************\n FACTORY METHODS\n*******************************************************************************************\n\"\"\"\n_mfs_factory = {'hbnmf': _HBNMF, 'svd': _SVD, 'nmf': _NMF, 'memory': _Memory, 's_memory': _S_Memory, 'popularity': _Popularity}\n\n\ndef print_methods():\n \"\"\"\n Prints the available methods.\n \"\"\"\n log.info('Available MF methods: %s' % list(_mfs_factory.keys()))\n\n\ndef factorize_matrix(method, data, dim=None, area=None):\n \"\"\"\n Find latent space representation for the counts data and return the factorized matrix.\n The factorized matrix is a product of W * H' where W and H ar the latent space representation of the\n individuals and location respectively.\n\n INPUT:\n -------\n 1. method: matrix factorization method.\n 2. data: <(I, L) csr_mat> counts matrix.\n 3. dim: number of hidden latent dimensions.\n\n OUTPUT:\n --------\n 1. mf: <(I, L) ndarray> matrix factorization for the data.\n\n RAISE:\n -------\n 1. NotImplementedError: Method was not implemented (not in _\n \"\"\"\n method = method.lower()\n if method not in _mfs_factory:\n raise NotImplementedError('Method %s not implemented. Use %s instead.' % (method, list(_mfs_factory.keys())))\n\n mf = _mfs_factory[method]()\n return mf.get_factorized_mat(data, dim, area)\n\n","repo_name":"disiji/person_mf_smooth","sub_path":"mfs.py","file_name":"mfs.py","file_ext":"py","file_size_in_byte":6619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"34698697122","text":"def makeTuples(a):\n aList = list()\n for i in a:\n p1 = [(i,j) for j in a]\n aList = aList + p1\n return aList\n\ndef makeBlockedList():\n iNums = [1,3,6,8,11,13]\n iNums2 = [5,9]\n iNums3 = [6,8]\n aList = makeTuples(iNums)\n bList = makeTuples(iNums2)\n cList = makeTuples(iNums3)\n return aList + bList + cList\n\ndef addTuples(tup1,tup2):\n return tuple(sum(x) for x in zip(tup1, tup2))\n\ndef checkBounds(coords,size):\n if -1 in coords:\n return False\n for i in coords:\n if i >= size:\n return False\n return True\n\ndef validMove(player,direction,size):\n bl = makeBlockedList()\n coords = player[\"location\"]\n nCoords = addTuples(coords,direction)\n if(checkBounds(nCoords,size)):\n if nCoords in bl:\n return False\n player[\"location\"] = nCoords\n return True\n return False\n\ndef checkGoal(player,goal):\n if(goal == player[\"location\"]):\n print(\"Congrats you've reached the goal\")\n return True\n return False\n","repo_name":"jgomz217/arduino-escape","sub_path":"collegeRoom/movement.py","file_name":"movement.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"13026779375","text":"from . import AWSObject, AWSProperty\nfrom .validators import boolean\n\n\nclass Spend(AWSProperty):\n props = {\n 'Amount': (float, True),\n 'Unit': (basestring, True),\n }\n\n\nclass CostTypes(AWSProperty):\n props = {\n 'IncludeCredit': (boolean, False),\n 'IncludeDiscount': (boolean, False),\n 'IncludeOtherSubscription': (boolean, False),\n 'IncludeRecurring': (boolean, False),\n 'IncludeRefund': (boolean, False),\n 'IncludeSubscription': (boolean, False),\n 'IncludeSupport': (boolean, False),\n 'IncludeTax': (boolean, False),\n 'IncludeUpfront': (boolean, False),\n 'UseAmortized': (boolean, False),\n 'UseBlended': (boolean, False),\n }\n\n\nclass TimePeriod(AWSProperty):\n props = {\n 'End': (basestring, False),\n 'Start': (basestring, False),\n }\n\n\nclass BudgetData(AWSProperty):\n props = {\n 'BudgetLimit': (Spend, False),\n 'BudgetName': (basestring, False),\n 'BudgetType': (basestring, True),\n 'CostFilters': (dict, False),\n 'CostTypes': (CostTypes, False),\n 'TimePeriod': (TimePeriod, False),\n 'TimeUnit': (basestring, True),\n }\n\n\nclass Notification(AWSProperty):\n props = {\n 'ComparisonOperator': (basestring, True),\n 'NotificationType': (basestring, True),\n 'Threshold': (float, True),\n 'ThresholdType': (basestring, False),\n }\n\n\nclass Subscriber(AWSProperty):\n props = {\n 'Address': (basestring, True),\n 'SubscriptionType': (basestring, True),\n }\n\n\nclass NotificationWithSubscribers(AWSProperty):\n props = {\n 'Notification': (Notification, True),\n 'Subscribers': ([Subscriber], True),\n }\n\n\nclass Budget(AWSObject):\n resource_type = \"AWS::Budgets::Budget\"\n\n props = {\n 'Budget': (BudgetData, True),\n 'NotificationsWithSubscribers':\n ([NotificationWithSubscribers], False),\n }\n","repo_name":"FreeFlowOrg/researchflo","sub_path":"venv/lib/python2.7/site-packages/troposphere/budgets.py","file_name":"budgets.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"91"} +{"seq_id":"3991502388","text":"# pylint: disable=missing-docstring\n\nGOAL = \"finish\"\nINFINITY = float(\"inf\")\n\ngraph = {\n \"start\": {\"a\":6,\"b\":2},\n \"a\": {\"finish\":1},\n \"b\": {\"a\":3,\"finish\":5},\n \"finish\": {},\n}\n\ncosts = {\n \"a\":6,\n \"b\":2,\n \"finish\": INFINITY\n}\n\nparents = {\n \"start\": None,\n \"a\":\"start\",\n \"b\":\"start\",\n \"finish\": None\n}\n\nprocessed = []\n\ndef find_lowest_cost_node():\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node,cost in costs.items():\n if cost < lowest_cost and node not in processed:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node\n\ndef dijkstra():\n current_node = find_lowest_cost_node()\n while current_node is not None:\n cost = costs[current_node]\n neighbors = graph[current_node]\n for node in neighbors.keys():\n new_cost = cost + neighbors[node]\n if costs[node] > new_cost:\n costs[node] = new_cost\n parents[node] = current_node\n processed.append(current_node)\n current_node = find_lowest_cost_node()\n\ndijkstra()\n\ntrack_node = parents[GOAL]\nroute = [GOAL]\nwhile track_node:\n route.append(track_node)\n track_node = parents[track_node]\n\nroute.reverse()\nprint(f\"Route: {route}\")\nprint(f\"Minimum time: {costs[GOAL]}\")\n","repo_name":"devdiegocardoso/Studying","sub_path":"grokking_algorithms/graph algorithms/dijkstra_routes.py","file_name":"dijkstra_routes.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"70808471664","text":"#!/usr/bin/env python3\nimport itertools\nimport re\n\nimport utils\nfrom year_2019.day_05.part_a import InsufficientInputError\n\nfrom year_2019.day_05.part_b import get_program_result_and_output_extended\nfrom year_2019.day_17.part_a import DIRECTION_UP, DIRECTION_DOWN,\\\n DIRECTION_RIGHT, DIRECTION_LEFT, get_intersections,\\\n get_neighbour_positions, get_scaffolds_start_position_and_direction, \\\n OFFSET_MAP, parse_image\n# noinspection PyUnresolvedReferences\nimport year_2019.day_09.part_a\n\n\nclass Challenge(utils.BaseChallenge):\n def solve(self, _input, debug=False):\n \"\"\"\n >>> Challenge().default_solve()\n 880360\n \"\"\"\n image = get_image_from_program_text(_input)\n movement_commands_text = get_movement_commands_text_from_image(image)\n routine_commands_text, chunks = \\\n find_routine_and_functions_for_movement_commands_new(\n movement_commands_text)\n\n try:\n _, output = get_program_result_and_output_extended(\n _input, list(map(ord, (\n \"{}\\n\"\n \"{}\"\n \"n\\n\".format(\n routine_commands_text,\n \"\".join(map(\"{}\\n\".format, chunks)),\n )\n ))), substitutions={0: 2})\n except InsufficientInputError as e:\n if debug:\n print(\"\".join(map(chr, e.output_stream)))\n raise\n\n *message, result = output\n if debug:\n print(\"\".join(map(chr, message)))\n\n return result\n\n\ndef find_routine_and_functions_for_movement_commands_new(\n movement_commands_text, max_length=20):\n chunks = find_functions_for_movement_commands(\n movement_commands_text, max_length=max_length)[0]\n routine_commands = []\n function_names = ['A', 'B', 'C']\n remaining_movement_commands_text = movement_commands_text\n while remaining_movement_commands_text:\n matching_chunk, matching_function_name = next((\n (chunk, name)\n for chunk, name in zip(chunks, function_names)\n if remaining_movement_commands_text.startswith(chunk)\n ), (None, None))\n if matching_function_name is None:\n raise Exception(\n f\"Could not find any chunk from {chunks} at the start of \"\n f\"{remaining_movement_commands_text}\")\n routine_commands.append(matching_function_name)\n remaining_movement_commands_text = \\\n remaining_movement_commands_text[len(matching_chunk):]\n if remaining_movement_commands_text.startswith(','):\n remaining_movement_commands_text = \\\n remaining_movement_commands_text[1:]\n\n routine_commands_text = ','.join(routine_commands)\n return routine_commands_text, chunks\n\n\ndef find_functions_for_movement_commands(movement_commands_text, max_length=20):\n commands_portions = [movement_commands_text]\n first_chunk_and_commands_portions_list = [\n ((first_chunk,),\n replace_chunk_in_portions(commands_portions, first_chunk))\n for first_chunk in get_chunks(commands_portions, max_length)\n ]\n second_chunk_and_commands_portions_list = [\n ((first_chunk, second_chunk),\n replace_chunk_in_portions(first_commands_portions, second_chunk))\n for (first_chunk,), first_commands_portions\n in first_chunk_and_commands_portions_list\n for second_chunk in get_chunks(first_commands_portions, max_length)\n ]\n third_chunk_and_commands_portions_list = [\n ((first_chunk, second_chunk, third_chunk),\n replace_chunk_in_portions(second_commands_portions, third_chunk))\n for (first_chunk, second_chunk,), second_commands_portions\n in second_chunk_and_commands_portions_list\n for third_chunk in get_chunks(second_commands_portions, max_length)\n ]\n chunks_list = sorted({\n tuple(sorted(chunks))\n for chunks, third_commands_portions\n in third_chunk_and_commands_portions_list\n if not third_commands_portions\n })\n if not chunks_list:\n raise Exception(\"Could not find three commands to split\")\n\n return chunks_list\n\n\ndef replace_chunk_in_portions(commands_portions, chunk):\n return [\n command_portion.strip(',')\n for command_portion in filter(None, sum((\n re.compile(r'(?:,{0})+,'.format(re.escape(chunk)))\n .split(f\",{commands_portion},\")\n for commands_portion in commands_portions\n ), []))\n ]\n\n\ndef get_chunks(commands_portions, max_length=20):\n return sorted(\n chunk\n for chunk in {\n \",\".join(commands_portion.split(\",\")[:length])\n for commands_portion in commands_portions\n for length in range(1, (max_length + 1) // 2)\n }\n if len(chunk) <= max_length\n )\n\n\ndef find_routine_and_functions_for_movement_commands(movement_commands):\n \"\"\"\n >>> solution_a = ('A,B,C,B,A,C', {'A': 'R,8,R,8', 'B': 'R,4,R,4,R,8', \\\n 'C': 'L,6,L,2'})\n >>> movement_commands_a = get_movement_commands_text_from_image(\\\n \"#######...#####\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"......#...#...#\\\\n\"\\\n \"......#...###.#\\\\n\"\\\n \"......#.....#.#\\\\n\"\\\n \"^########...#.#\\\\n\"\\\n \"......#.#...#.#\\\\n\"\\\n \"......#########\\\\n\"\\\n \"........#...#..\\\\n\"\\\n \"....#########..\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#####......\")\n >>> routines_a = find_routine_and_functions_for_movement_commands(\\\n movement_commands_a)\n >>> set(routines_a[0][0]) == set('ABC,') if routines_a else None\n True\n >>> solution_a in routines_a\n True\n >>> all(\\\n expand_main_routine(routine_text, function_texts) \\\n == movement_commands_a \\\n for routine_text, function_texts in routines_a\\\n )\n True\n \"\"\"\n max_length = 80\n without_commas = movement_commands.replace(',', '')\n possible_functions_without_commas = [\n (a, b, c)\n for a, b, c, remaining_after_a_b_c in [\n (a, b, c, remaining_after_a_b_c)\n for a, b, remaining_after_a_b in [\n (a, b, remaining_after_a_b)\n for a, remaining_after_a in [\n (a, remaining_after_a)\n for a in (\n without_commas[:length_a]\n for length_a\n in range(1, min(max_length + 1, len(without_commas)))\n )\n for remaining_after_a\n in list(filter(None, without_commas.split(a))) or ['']\n ]\n for b in (\n remaining_after_a[:length_b]\n for length_b\n in range(1, min(max_length + 1, len(remaining_after_a)))\n )\n for remaining_after_a_b\n in list(filter(None, remaining_after_a.split(b))) or ['']\n ]\n for c in (\n remaining_after_a_b[:length_c]\n for length_c\n in range(1, min(max_length + 1, len(remaining_after_a_b)))\n )\n for remaining_after_a_b_c\n in list(filter(None, remaining_after_a_b.split(c))) or ['']\n ]\n ]\n all_possible_functions_without_commas = {\n (a, b, c)\n for triplet in possible_functions_without_commas\n if len(set(triplet)) == 3\n for a, b, c in itertools.permutations(triplet)\n }\n functions_without_commas = [\n (a, b, c)\n for a, b, c in all_possible_functions_without_commas\n if not (\n without_commas\n .replace(a, '|')\n .replace(b, '|')\n .replace(c, '|')\n .replace('|', '')\n )\n and len(\n without_commas\n .replace(a, 'A')\n .replace(b, 'B')\n .replace(c, 'C')\n ) <= max_length\n ]\n if not functions_without_commas:\n return []\n\n bracketed_routines_without_commas = {\n \"|\".join(sorted([a, b, c])): (\n without_commas\n .replace(a, 'A')\n .replace(b, 'B')\n .replace(c, 'C')\n .replace('A', f\"({a})\")\n .replace('B', f\"({b})\")\n .replace('C', f\"({c})\")\n )\n for a, b, c in functions_without_commas\n }\n\n all_permutations_of_functions_without_commas = {\n (a, b, c)\n for triplet in functions_without_commas\n for a, b, c in itertools.permutations(triplet)\n }\n\n def get_routine(a, b, c):\n _hash = \"|\".join(sorted([a, b, c]))\n bracketed_routine = bracketed_routines_without_commas[_hash]\n return \",\".join(\n bracketed_routine\n .replace(f\"({a})\", 'A')\n .replace(f\"({b})\", 'B')\n .replace(f\"({c})\", 'C')\n )\n\n return [\n (get_routine(a, b, c), {\n 'A': \",\".join(a),\n 'B': \",\".join(b),\n 'C': \",\".join(c),\n })\n for a, b, c in all_permutations_of_functions_without_commas\n ]\n\n\ndef get_image_from_program_text(program_text):\n _, output = get_program_result_and_output_extended(program_text, [])\n image = parse_image(output)\n\n return image\n\n\ndef get_movement_commands_text_from_image(image):\n \"\"\"\n >>> get_movement_commands_text_from_image(\\\n \"#######...#####\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"......#...#...#\\\\n\"\\\n \"......#...###.#\\\\n\"\\\n \"......#.....#.#\\\\n\"\\\n \"^########...#.#\\\\n\"\\\n \"......#.#...#.#\\\\n\"\\\n \"......#########\\\\n\"\\\n \"........#...#..\\\\n\"\\\n \"....#########..\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#####......\")\n 'R,8,R,8,R,4,R,4,R,8,L,6,L,2,R,4,R,4,R,8,R,8,R,8,L,6,L,2'\n >>> get_movement_commands_text_from_image(\n ... get_image_from_program_text(challenge.input))\n 'L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,6,R,12,L,10,L,4,L,6,L,6,R,12,L,\\\n6,L,10,L,10,L,4,L,6,R,12,L,10,L,4,L,6,L,10,L,10,L,4,L,6,L,6,R,12,L,6,L,10,L,\\\n10,L,4,L,6'\n \"\"\"\n scaffolds, start_position, start_direction = \\\n get_scaffolds_start_position_and_direction(image)\n movement_commands = get_movement_commands(\n get_scaffolds_order(scaffolds, start_position), start_direction)\n return \",\".join(map(str, movement_commands))\n\n\ndef expand_main_routine(routine_text, function_texts):\n \"\"\"\n >>> expand_main_routine('A,B,C,B,A,C', \\\n {'A': 'R,8,R,8', 'B': 'R,4,R,4,R,8', 'C': 'L,6,L,2'})\n 'R,8,R,8,R,4,R,4,R,8,L,6,L,2,R,4,R,4,R,8,R,8,R,8,L,6,L,2'\n \"\"\"\n routine = routine_text.split(',')\n functions = {\n name: function_text.split(',')\n for name, function_text in function_texts.items()\n }\n replaced_routine = sum((\n functions[name]\n for name in routine\n ), [])\n\n return ','.join(replaced_routine)\n\n\ndef get_movement_commands(scaffolds_order, start_direction):\n \"\"\"\n >>> _scaffolds, _start_position, _start_direction = \\\n get_scaffolds_start_position_and_direction(\\\n \"#######...#####\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"......#...#...#\\\\n\"\\\n \"......#...###.#\\\\n\"\\\n \"......#.....#.#\\\\n\"\\\n \"^########...#.#\\\\n\"\\\n \"......#.#...#.#\\\\n\"\\\n \"......#########\\\\n\"\\\n \"........#...#..\\\\n\"\\\n \"....#########..\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#####......\")\n >>> get_movement_commands(get_scaffolds_order(\\\n _scaffolds, _start_position), _start_direction)\n ['R', 8, 'R', 8, 'R', 4, 'R', 4, 'R', 8, 'L', 6, 'L', 2, 'R', 4, 'R', 4, \\\n'R', 8, 'R', 8, 'R', 8, 'L', 6, 'L', 2]\n >>> get_movement_commands(get_scaffolds_order(\\\n _scaffolds, _start_position), DIRECTION_RIGHT)\n [8, 'R', 8, 'R', 4, 'R', 4, 'R', 8, 'L', 6, 'L', 2, 'R', 4, 'R', 4, \\\n'R', 8, 'R', 8, 'R', 8, 'L', 6, 'L', 2]\n \"\"\"\n directions = [\n DIRECTION_FROM_OFFSET_MAP[(get_offset(previous, current))]\n for previous, current in zip(scaffolds_order, scaffolds_order[1:])\n ]\n grouped_directions = [(start_direction, 1)] + [\n (direction, len(list(count)))\n for direction, count in itertools.groupby(directions)\n ]\n commands = sum((\n [get_direction_change_command(previous_direction, current_direction),\n current_count]\n for (previous_direction, _), (current_direction, current_count)\n in zip(grouped_directions, grouped_directions[1:])\n ), [])\n if not commands:\n return []\n\n first_command = commands[0]\n if not first_command:\n commands = commands[1:]\n\n return commands\n\n\nCOMMAND_RIGHT = \"R\"\nCOMMAND_LEFT = \"L\"\n\n\nDIRECTION_FROM_OFFSET_MAP = {\n (0, -1): DIRECTION_UP,\n (0, 1): DIRECTION_DOWN,\n (1, 0): DIRECTION_RIGHT,\n (-1, 0): DIRECTION_LEFT,\n}\n\n\nDIRECTION_CHANGE_TO_COMMAND_MAP = {\n DIRECTION_UP: {\n DIRECTION_UP: None,\n DIRECTION_RIGHT: COMMAND_RIGHT,\n DIRECTION_LEFT: COMMAND_LEFT,\n },\n DIRECTION_DOWN: {\n DIRECTION_DOWN: None,\n DIRECTION_RIGHT: COMMAND_LEFT,\n DIRECTION_LEFT: COMMAND_RIGHT,\n },\n DIRECTION_RIGHT: {\n DIRECTION_UP: COMMAND_LEFT,\n DIRECTION_DOWN: COMMAND_RIGHT,\n DIRECTION_RIGHT: None,\n },\n DIRECTION_LEFT: {\n DIRECTION_UP: COMMAND_RIGHT,\n DIRECTION_DOWN: COMMAND_LEFT,\n DIRECTION_LEFT: None,\n },\n}\n\n\ndef get_direction_change_command(previous, current):\n \"\"\"\n >>> get_direction_change_command(DIRECTION_UP, DIRECTION_RIGHT)\n 'R'\n >>> get_direction_change_command(DIRECTION_RIGHT, DIRECTION_UP)\n 'L'\n >>> get_direction_change_command(DIRECTION_RIGHT, DIRECTION_RIGHT)\n >>> get_direction_change_command(DIRECTION_RIGHT, DIRECTION_LEFT)\n Traceback (most recent call last):\n ...\n Exception: Cannot change directions from right to left\n >>> right_rotations = [DIRECTION_UP, DIRECTION_RIGHT, DIRECTION_DOWN, \\\n DIRECTION_LEFT, DIRECTION_UP]\n >>> {\\\n get_direction_change_command(_previous, _current) \\\n for _previous, _current in zip(right_rotations, right_rotations[1:])\\\n }\n {'R'}\n >>> left_rotations = list(reversed(right_rotations))\n >>> {\\\n get_direction_change_command(_previous, _current) \\\n for _previous, _current in zip(left_rotations, left_rotations[1:])\\\n }\n {'L'}\n >>> {\\\n get_direction_change_command(item, item) \\\n for item in left_rotations \\\n }\n {None}\n \"\"\"\n if current not in DIRECTION_CHANGE_TO_COMMAND_MAP[previous]:\n raise Exception(\n f\"Cannot change directions from {previous} to {current}\")\n\n return DIRECTION_CHANGE_TO_COMMAND_MAP[previous][current]\n\n\ndef get_scaffolds_order(scaffolds, start_position):\n \"\"\"\n >>> _scaffolds, _start_position, _ = \\\n get_scaffolds_start_position_and_direction(\\\n \"#######...#####\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"#.....#...#...#\\\\n\"\\\n \"......#...#...#\\\\n\"\\\n \"......#...###.#\\\\n\"\\\n \"......#.....#.#\\\\n\"\\\n \"^########...#.#\\\\n\"\\\n \"......#.#...#.#\\\\n\"\\\n \"......#########\\\\n\"\\\n \"........#...#..\\\\n\"\\\n \"....#########..\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#...#......\\\\n\"\\\n \"....#####......\")\n >>> _scaffolds_order = get_scaffolds_order(_scaffolds, _start_position)\n >>> _scaffolds_order\n [(0, 6), (1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (6, 6), (7, 6), (8, 6), \\\n(8, 7), (8, 8), (8, 9), (8, 10), (8, 11), (8, 12), (8, 13), (8, 14), (7, 14), \\\n(6, 14), (5, 14), (4, 14), (4, 13), (4, 12), (4, 11), (4, 10), (5, 10), \\\n(6, 10), (7, 10), (8, 10), (9, 10), (10, 10), (11, 10), (12, 10), (12, 9), \\\n(12, 8), (12, 7), (12, 6), (12, 5), (12, 4), (11, 4), (10, 4), (10, 3), \\\n(10, 2), (10, 1), (10, 0), (11, 0), (12, 0), (13, 0), (14, 0), (14, 1), \\\n(14, 2), (14, 3), (14, 4), (14, 5), (14, 6), (14, 7), (14, 8), (13, 8), \\\n(12, 8), (11, 8), (10, 8), (9, 8), (8, 8), (7, 8), (6, 8), (6, 7), (6, 6), \\\n(6, 5), (6, 4), (6, 3), (6, 2), (6, 1), (6, 0), (5, 0), (4, 0), (3, 0), \\\n(2, 0), (1, 0), (0, 0), (0, 1), (0, 2)]\n >>> _scaffolds_order[0] == _start_position\n True\n >>> set(_scaffolds_order) == set(_scaffolds)\n True\n >>> all(\\\n get_offset(_previous, _current) in OFFSET_MAP.values() \\\n for _previous, _current \\\n in zip(_scaffolds_order, _scaffolds_order[1:]) \\\n )\n True\n \"\"\"\n intersections = get_intersections(scaffolds)\n intersections_left = set(intersections)\n position = start_position\n scaffolds_order = [position]\n scaffolds_left = set(scaffolds)\n scaffolds_left.remove(position)\n last_intersection_diff = None\n while scaffolds_left:\n neighbour_scaffolds = \\\n set(get_neighbour_positions(position)) & scaffolds_left\n if not neighbour_scaffolds:\n raise Exception(\n f\"There {len(scaffolds_left)} remaining scaffolds, but none \"\n f\"of them are near the current position {position} (order was \"\n f\"{scaffolds_order})\")\n if len(neighbour_scaffolds) > 1:\n if not last_intersection_diff:\n raise Exception(\n f\"There are many neighbours around {position} \"\n f\"({neighbour_scaffolds}), but we have no intersection \"\n f\"information (order was {scaffolds_order})\")\n last_intersection_diff_x, last_intersection_diff_y = \\\n last_intersection_diff\n previous_x, previous_y = position\n expected_next_position = \\\n previous_x + last_intersection_diff_x, \\\n previous_y + last_intersection_diff_y\n if expected_next_position not in neighbour_scaffolds:\n raise Exception(f\"Intersection suddenly stopped\")\n next_position = expected_next_position\n else:\n next_position, = neighbour_scaffolds\n if next_position in intersections_left:\n intersections_left.remove(next_position)\n previous_x, previous_y = position\n current_x, current_y = next_position\n last_intersection_diff = \\\n current_x - previous_x, current_y - previous_y\n else:\n scaffolds_left.remove(next_position)\n if position not in intersections:\n last_intersection_diff = None\n scaffolds_order.append(next_position)\n position = next_position\n\n return scaffolds_order\n\n\ndef get_offset(start, end):\n \"\"\"\n >>> get_offset((0, 0), (0, 0))\n (0, 0)\n >>> get_offset((0, 0), (0, 1))\n (0, 1)\n >>> get_offset((-3, 4), (-4, 4))\n (-1, 0)\n \"\"\"\n start_x, start_y = start\n end_x, end_y = end\n\n return end_x - start_x, end_y - start_y\n\n\nChallenge.main()\nchallenge = Challenge()\n","repo_name":"costas-basdekis/advent-of-code-submissions","sub_path":"year_2019/day_17/part_b.py","file_name":"part_b.py","file_ext":"py","file_size_in_byte":19122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"5224827031","text":"from recip.util.Serializable import Serializable\nfrom recip.util import DataType\nfrom recip.util import RLP\n\nclass IndexBlock(Serializable):\n def __init__(self):\n self.chainWork = None\n self.previousHash = None\n self.gasLimit = None\n self.height = None\n \n def serialize(self):\n item = [\n self.chainWork,\n self.previousHash,\n self.gasLimit,\n self.height\n ]\n return RLP.encode(item)\n \n def deserialize(self, buffer):\n decodedBuffer = RLP.decode(buffer)\n self.chainWork = DataType.deserialize(decodedBuffer[0], DataType.INT, 0)\n self.previousHash = decodedBuffer[1]\n self.gasLimit = DataType.deserialize(decodedBuffer[2], DataType.INT, 0)\n self.height = DataType.deserialize(decodedBuffer[3], DataType.INT, 0)","repo_name":"anthonybuckle/Reciprocity-Core","sub_path":"recip/core/IndexBlock.py","file_name":"IndexBlock.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"30206865107","text":"from __future__ import print_function\nimport torch\nimport numpy as np\nfrom PIL import Image\nimport os\nimport collections\n\n\nIMG_EXTENSIONS = [\n '.jpg', '.JPG', '.jpeg', '.JPEG',\n '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',\n]\n\n\ndef is_image_file(filename):\n return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)\n\n\n# Converts a Tensor into a Numpy array\n# |imtype|: the desired type of the converted numpy array\ndef tensor2im(image_tensor, mean=(0.5, 0.5, 0.5), img_mul=2.):\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = (np.transpose(image_numpy, (1, 2, 0)) / img_mul + np.array(mean)) * 255.0\n image_numpy = image_numpy.clip(0, 255)\n return np.around(image_numpy).astype(np.uint8)\n\n\ndef tensor2ims(tensor, imtype=np.uint8):\n b, c, h, w = tensor.size()\n tensor = tensor.view(-1, h, w)\n tensor = tensor.cpu().float().numpy()\n ims = []\n for i in range(tensor.shape[0]):\n im = tensor[i:i+1]\n im -= np.min(im)\n crange = np.max(im)\n if not crange == 0:\n im /= crange\n im *= 255\n ims.append(im.transpose((1, 2, 0)).astype(imtype))\n return ims\n\n\ndef patchify(img, patch_shape):\n img = np.ascontiguousarray(img) # won't make a copy if not needed\n X, Y = img.shape\n x, y = patch_shape\n # number of patches, patch_shape\n shape = ((X - x + 1), (Y - y + 1), x, y)\n # The right strides can be thought by:\n # 1) Thinking of `img` as a chunk of memory in C order\n # 2) Asking how many items through that chunk of memory are needed when indices\n # i,j,k,l are incremented by one\n strides = img.itemsize*np.array([Y, 1, Y, 1])\n return np.lib.stride_tricks.as_strided(img, shape=shape, strides=strides)\n\n\ndef mod_crop(im, scale):\n h, w = im.shape[:2]\n # return im[(h % scale):, (w % scale):, ...]\n return im[:h-(h % scale), :w-(w % scale), ...]\n\n\ndef cut_boundary(im1, scale):\n boundarypixels = 0\n if scale > 1:\n boundarypixels = scale\n im1 = im1[boundarypixels: -boundarypixels, boundarypixels: -boundarypixels, :]\n\n return im1\n\n\ndef diagnose_network(net, name='network'):\n mean = 0.0\n count = 0\n for param in net.parameters():\n if param.grad is not None:\n mean += torch.mean(torch.abs(param.grad.data))\n count += 1\n if count > 0:\n mean = mean / count\n print(name)\n print(mean)\n\n\ndef print_network(net):\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print(net)\n print('Total number of parameters: %d' % num_params)\n\n\ndef save_image(image_numpy, image_path, mode=None):\n image_pil = Image.fromarray(image_numpy, mode).convert('RGB')\n image_pil.save(image_path)\n\n\ndef info(object, spacing=10, collapse=1):\n \"\"\"Print methods and doc strings.\n Takes module, class, list, dictionary, or string.\"\"\"\n methodList = [e for e in dir(object) if isinstance(getattr(object, e), collections.Callable)]\n processFunc = collapse and (lambda s: \" \".join(s.split())) or (lambda s: s)\n print(\"\\n\".join([\"%s %s\" %\n (method.ljust(spacing),\n processFunc(str(getattr(object, method).__doc__)))\n for method in methodList]))\n\n\ndef print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))\n\n\ndef mkdirs(paths):\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)\n\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef read_csv_as_list(csv_file):\n import csv\n try:\n with open(csv_file) as csvfile:\n reader = csv.DictReader(csvfile, dialect='excel', quoting=csv.QUOTE_NONNUMERIC)\n datalist = []\n datalist = list(reader)\n return datalist\n except FileNotFoundError as ex:\n raise ex\n\n\ndef truncate_list_of_dict(ld, key, upper_bound):\n \"\"\"truncate the list of dict according to dict[key] < value, assume the values are sorted\"\"\"\n if len(ld) == 0:\n return ld\n for i, d in enumerate(ld):\n if key in d and d[key] >= upper_bound:\n i -= 1\n break\n return ld[:(i+1)]\n\n\ndef write_list_to_csv(csv_file, data_list, csv_columns=None):\n import csv\n try:\n with open(csv_file, 'w') as csvfile:\n if len(data_list) > 0:\n fieldnames = csv_columns if csv_columns else list(data_list[-1].keys())\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames,\n restval=0.0, extrasaction='ignore', quoting=csv.QUOTE_NONNUMERIC)\n writer.writeheader()\n for data in data_list:\n writer.writerow(data)\n\n except OSError as ex:\n print(\"I/O error({0}): {1}\".format(ex.errno, ex.strerror))\n return\n\n\nclass Struct:\n '''The recursive class for building and representing objects with.'''\n def __init__(self, obj):\n for k, v in obj.items():\n if isinstance(v, dict):\n setattr(self, k, Struct(v))\n else:\n setattr(self, k, v)\n\n def __getitem__(self, val):\n return self.__dict__[val]\n\n def __repr__(self):\n return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for\n (k, v) in self.__dict__.items()))\n\n\ndef parse_config(defaults_dict, config_dict):\n # recusively update dict with u\n def update(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d\n z = defaults_dict.copy()\n update(z, config_dict)\n return Struct(z)\n\n\ndef print_current_errors(epoch, i, errors, t, log_name=None):\n message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)\n for k, v in errors.items():\n message += '%s: %.3f ' % (k, v)\n\n print(message)\n if log_name is not None:\n with open(log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message)\n\n","repo_name":"PeterZhouSZ/proSR","sub_path":"lib/prosr/misc/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"31016689684","text":"############################################################\n# CMPSC 442: Homework 6\n############################################################\n\nstudent_name = \"Kangdong Yuan\"\n\n############################################################\n# Imports\n############################################################\n\n# Include your imports here, if any are used.\nimport re\nfrom collections import defaultdict\nimport math\n\n############################################################\n# Section 1: Hidden Markov Models\n############################################################\n\ndef load_corpus(path):\n result = []\n f = open(path, 'r')\n for line in f:\n temp = re.split(' |=', line.strip())\n result.append(list((temp[i], temp[i + 1]) for i in range(0, len(temp) - 1, 2)))\n f.close()\n return result\n\nclass Tagger(object):\n\n def __init__(self, sentences):\n self.sentences = sentences\n self.tag = {\"NOUN\": 0,\n \"VERB\": 0,\n \"ADJ\": 0,\n \"ADV\": 0,\n \"PRON\": 0,\n \"DET\": 0,\n \"ADP\": 0,\n \"NUM\": 0,\n \"CONJ\": 0,\n \"PRT\": 0,\n \".\": 0,\n \"X\": 0}\n partsofspeech=[\"NOUN\",\"VERB\",\"ADJ\",\"ADV\",\"PRON\",\"DET\",\"ADP\",\"NUM\",\"CONJ\",\"PRT\",\".\",\"X\"]\n self.partsofspeechtransition = {}\n postrans = self.partsofspeechtransition\n\n for tg in partsofspeech:\n for j in partsofspeech:\n self.partsofspeechtransition[tuple([tg, j])] = 0\n\n self.sample, tags, pairs = {}, [], {}\n sumtotal, sumtrans, laplace, check = 0, 0, 1e-10, 0\n\n possibleoutcomes = len(self.tag)\n pairs = self.tag.copy()\n\n for line in self.sentences:\n check, pos01 = check + 1, line[0][1]\n\n if pos01 in self.tag:\n self.tag[pos01], sumtotal = self.tag[pos01] + 1, sumtotal + 1\n\n samplekeys = self.sample.keys()\n for tok in line:\n\n if tok not in samplekeys:\n self.sample[tok] = 1\n\n elif tok in samplekeys:\n self.sample[tok] = self.sample[tok] + 1\n\n t1 = tok[1]\n if t1 in pairs:\n pairs[t1] = pairs[t1] + 1\n\n tags.append(t1)\n\n for t in self.tag:\n smtn, denom = self.tag[t] + laplace, sumtotal + possibleoutcomes*laplace\n self.tag[t] = smtn / denom\n\n ran = len(tags) - 1\n for tg in range(0, ran):\n tpair = tuple([tags[tg], tags[tg+1]])\n if tpair in postrans:\n postrans[tpair], sumtrans = postrans[tpair] + 1, sumtrans + 1\n\n for value in postrans:\n smtn, denom = laplace + postrans[value], len(postrans) * laplace + sumtrans\n postrans[value] = smtn / denom\n\n for k, tpair in self.sample.items():\n if k[1] in pairs:\n smtn, denom = self.sample[k] + laplace, pairs[k[1]] + (laplace * 12)\n self.sample[k] = smtn / denom\n CONSTANT = 3.14e-10\n\n begin = defaultdict(lambda : 0)\n default = defaultdict(lambda : 0)\n nex = defaultdict(lambda : default.copy())\n generate = defaultdict(lambda : default.copy())\n for s in sentences:\n begin[s[0][1]] += 1\n for i in range(len(s) - 1):\n j = i + 1\n nex[s[i][1]][\"total\"] += 1\n nex[s[i][1]][s[j][1]] += 1\n generate[s[i][0]][\"total\"] += 1\n generate[s[i][0]][s[i][1]] += 1\n generate[s[-1][0]][\"total\"] += 1\n generate[s[-1][0]][s[-1][1]] += 1\n\n self.emiss_probs = {}\n for w in generate:\n self.emiss_probs[w] = defaultdict(lambda : math.log((CONSTANT) / generate[w][\"total\"] + len(generate) * CONSTANT))\n for t in generate[w]:\n if t != \"total\":\n self.emiss_probs[w][t] = math.log((generate[w][t] + CONSTANT) / generate[w][\"total\"] + len(generate) * CONSTANT)\n \n def most_probable_tags(self, tokens):\n tags = []\n for t in tokens:\n if t in self.emiss_probs:\n tags.append(max(self.emiss_probs[t].items(), key = lambda x : x[1])[0])\n else:\n tags.append(max(self.emiss_probs[\"X\"].items(), key = lambda x : x[1])[0])\n return tags\n\n def alpha_helper(self, taglist, samplelist, laplace, tokens):\n dict_store = {}\n for v in taglist:\n start, end = (tokens[0], v), (0, v)\n if start not in samplelist:\n dict_store[end] = laplace\n if start in samplelist:\n dict_store[end] =taglist[v]*samplelist[start]\n return dict_store\n\n def toptag(self, tokens, alpha_para, location, taglist):\n most_tag = []\n for i in range(0, len(tokens)):\n start, end = 0, \"X\"\n for val in taglist:\n if alpha_para[(i, val)] > start:\n start, end = alpha_para[(i, val)], val\n most_tag += [end]\n for j in range(len(tokens) - 1, 0, -1):\n most_tag[j - 1] = location[(j, most_tag[j])]\n return most_tag\n\n def last_tag(self, taglist, finalre, alpha_para, final, i, j):\n for t in taglist:\n prob, position, pair = 0, self.partsofspeechtransition, (t, j)\n if pair in position:\n prob = alpha_para[(i - 1, t)]*position[(t, j)]\n if finalre < prob:\n finalre, final = prob, t\n return finalre, final\n\n def viterbi_tags(self, tokens):\n dist_para = 1e-10\n taglist = self.tag\n samplelist = self.sample\n alpha_para = self.alpha_helper(taglist, samplelist, dist_para, tokens)\n location = {}\n for i in range(1, len(tokens)):\n for j in taglist:\n finalre, final, tuple1 = 0, \"X\",(i, j)\n finalre, final = self.last_tag(taglist,finalre, alpha_para, final, i, j)\n if (tokens[i], j) in samplelist:\n alpha_para[tuple1] = finalre * samplelist[(tokens[i], j)]\n else:\n alpha_para[tuple1] = dist_para * finalre\n location[(i, j)] = final\n return self.toptag(tokens, alpha_para, location, taglist)\n\n# c = load_corpus(r\"C:\\Users\\yedkk\\Desktop\\CS442\\hw6\\brown-corpus.txt\")\n# t = Tagger(c)\n# s = \"I am waiting to reply\".split()\n# print(t.most_probable_tags(s))\n# print(t.viterbi_tags(s))","repo_name":"yedkk/artificial-intelligence-in-python","sub_path":"6. Hidden Markov Models/homework6_kky5082.py","file_name":"homework6_kky5082.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"91"} +{"seq_id":"71633501424","text":"\"\"\"\n Descrição: Utilitarios para o servidor\n Autores: Getulio Coimbra Regis e Igor Lara de Oliveira\n Creation Date: 26 / 10 / 2022\n\"\"\"\n\n# Tipo dos comandos\n\nCOMMAND_CREATE = 0x01\nCOMMAND_READ = 0x02\nCOMMAND_LIST = 0x03\nCOMMAND_UPDATE = 0x04\nCOMMAND_DELETE = 0x05\nCOMMAND_LISTALUNOFROMDISCIPLINA = 0x06\nCOMMAND_LISTADNFFROMAS = 0x07\n\n# Tipo das classes\n\nCLASS_CURSO = 0X10\nCLASS_DISCIPLINA = 0X11\nCLASS_ALUNO = 0X12\nCLASS_MATRICULA = 0X13\nCLASS_MATRICULA_NOTA = 0X14\nCLASS_MATRICULA_FALTAS = 0X15\n","repo_name":"getuliobr/SistemasDistribuidos","sub_path":"RPC/Server/server_utils.py","file_name":"server_utils.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37930304415","text":"from functools import cache\nfrom Binomial import Binomial\nfrom _tabltypes import set_attributes\n\n\"\"\"Labeled graphs.\n\n[0] 1;\n[1] 0, 1;\n[2] 0, 1, 1;\n[3] 0, 2, 2, 4;\n[4] 0, 8, 6, 12, 38;\n[5] 0, 64, 32, 48, 152, 728;\n[6] 0, 1024, 320, 320, 760, 3640, 26704;\n[7] 0, 32768, 6144, 3840, 6080, 21840, 160224, 1866256;\n\"\"\"\n\n\n@cache\ndef labeledgraphs(n: int) -> list[int]:\n\n if n == 0: return [1]\n\n s = [2 ** (((k - n + 1) * (k - n)) // 2) * Binomial(n - 1, k - 1) * labeledgraphs(k)[k] for k in range(1, n)]\n b = 2 ** (((n - 1) * n) // 2) - sum(s)\n \n return [0] + s + [b]\n\n\n@set_attributes(\n labeledgraphs, \n \"LabeledGraphs\", \n ['A360603'], \n True)\ndef LabeledGraphs(n: int, k: int) -> int: \n return labeledgraphs(n)[k]\n\n\nif __name__ == \"__main__\":\n from _tabltest import TablTest\n\n TablTest(LabeledGraphs)","repo_name":"PeterLuschny/tabl","sub_path":"src/LabeledGraphs.py","file_name":"LabeledGraphs.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"91"} +{"seq_id":"72885418222","text":"import pygame\nimport pygame._sdl2 as pgsdl\n\nfrom ..core.physics import _Physics\nfrom ..core.time import Time\nfrom ..utils import Vectorizable\nfrom .components import Collider\nfrom .scene import Entity\nfrom .transform import Transform\n\n\nclass RigidbodyCallbacks:\n default: \"RigidbodyCallbacks\" = None\n\n def __init__(self, rigidbody):\n self.rigidbody = rigidbody\n\n def on_collision_enter(self, collider: Collider):\n ...\n\n def on_collision_stay(self, collider: Collider):\n ...\n\n def on_collision_exit(self, collider: Collider):\n ...\n\n def on_trigger_enter(self, collider: Collider):\n ...\n\n def on_trigger_stay(self, collider: Collider):\n ...\n\n def on_trigger_exit(self, collider: Collider):\n ...\n\n\nRigidbodyCallbacks.default = RigidbodyCallbacks(None)\n\n\nclass RigidbodyEntity(Entity):\n def __init__(\n self,\n transform: Transform,\n layer_names: list[str],\n texture: pgsdl.Texture = None,\n tags: list[str] = None,\n callbacks_type: type[RigidbodyCallbacks] = None,\n is_static: bool = True,\n gravity_scale: float = 0,\n ):\n self._super_entity = super()\n self._callbacks = (\n callbacks_type(self)\n if callbacks_type is not None\n else RigidbodyCallbacks.default\n )\n self._hystory = {}\n self._colliders: list[Collider] = []\n\n self.acceleration: pygame.Vector2 = pygame.Vector2()\n self.speed: pygame.Vector2 = pygame.Vector2()\n self.is_static: bool = is_static\n self.gravity_scale: float = gravity_scale\n _Physics._register_body(self)\n\n self._super_entity.__init__(transform, layer_names, texture, tags)\n\n def destroy(self):\n _Physics._destroyed_body(self)\n self._super_entity.destroy()\n\n def update(self):\n if not self.is_static:\n for collider in self._colliders:\n collider._update_old()\n self.speed.x += self.acceleration.x * Time.delta_time\n self.transform.position.x += self.speed.x * Time.delta_time\n self._snap_colliders()\n self._collisions(\"h\")\n self.speed.y += (\n self.acceleration.y * Time.delta_time\n + self.gravity_scale * Time.delta_time\n )\n self.transform.position.y += self.speed.y * Time.delta_time\n self._snap_colliders()\n self._collisions(\"v\")\n self.acceleration = pygame.Vector2()\n\n def _collisions(self, direction):\n for collider in self._colliders:\n for body in _Physics._rigidbodies:\n if body is self:\n continue\n for other in body._colliders:\n if not other.id in self._hystory:\n self._hystory[other.id] = {\"h\": False, \"v\": False}\n if other.box.colliderect(collider.box):\n if not collider.is_trigger:\n if direction == \"h\":\n self._resolve_collisions_h(collider, other, body)\n elif direction == \"v\":\n self._resolve_collisions_v(collider, other, body)\n if self._hystory[other.id][direction]:\n self._callbacks.on_collision_stay(\n other\n ) if not collider.is_trigger else self._callbacks.on_trigger_stay(\n other\n )\n body._callbacks.on_collision_stay(\n collider\n ) if not other.is_trigger else body._callbacks.on_trigger_stay(\n collider\n )\n else:\n self._callbacks.on_collision_enter(\n other\n ) if not collider.is_trigger else self._callbacks.on_trigger_enter(\n other\n )\n body._callbacks.on_collision_enter(\n collider\n ) if not other.is_trigger else body._callbacks.on_trigger_enter(\n collider\n )\n self._hystory[other.id][direction] = True\n else:\n if self._hystory[other.id][direction]:\n self._callbacks.on_collision_exit(\n other\n ) if not collider.is_trigger else self._callbacks.on_trigger_exit(\n other\n )\n body._callbacks.on_collision_exit(\n collider\n ) if not other.is_trigger else body._callbacks.on_trigger_exit(\n collider\n )\n self._hystory[other.id][direction] = False\n\n def _resolve_collisions_h(\n self, collider: Collider, other: Collider, body: \"RigidbodyEntity\"\n ):\n if body.is_static:\n collider.box.left = (\n other.box.right if self.speed.x < 0 else collider.box.left\n )\n collider.box.right = (\n other.box.left if self.speed.x > 0 else collider.box.right\n )\n else:\n if (\n collider.box.left < other.box.right\n and collider._old_box.left >= other.box.right\n ):\n collider.box.left = other.box.right\n if (\n collider.box.right > other.box.left\n and collider._old_box.right <= other.box.left\n ):\n collider.box.right = other.box.left\n self.acceleration.x = 0\n self.speed.x = 0\n self.transform.position.x = collider.box.centerx - collider.offset.x\n self._snap_colliders()\n\n def _resolve_collisions_v(\n self, collider: Collider, other: Collider, body: \"RigidbodyEntity\"\n ):\n if body.is_static:\n collider.box.top = (\n other.box.bottom if self.speed.y < 0 else collider.box.top\n )\n collider.box.bottom = (\n other.box.top if self.speed.y > 0 else collider.box.bottom\n )\n else:\n if (\n collider.box.top < other.box.bottom\n and collider._old_box.top >= other.box.bottom\n ):\n collider.box.top = other.box.bottom\n if (\n collider.box.bottom > other.box.top\n and collider._old_box.bottom <= other.box.top\n ):\n collider.box.bottom = other.box.top\n self.acceleration.y = 0\n self.speed.y = 0\n self.transform.position.y = collider.box.centery - collider.offset.y\n self._snap_colliders()\n\n def _snap_colliders(self):\n for collider in self._colliders:\n collider.box.center = self.transform.position + collider.offset\n\n def add_collider(\n self,\n size: Vectorizable = None,\n offset: Vectorizable = None,\n is_trigger: bool = False,\n ) -> Collider:\n collider: Collider = self.add_component(Collider, init_component=False)\n collider.setup(size, offset, is_trigger)\n self._colliders.append(collider)\n return collider\n\n def add_acceleration(self, acceleration: Vectorizable):\n self.acceleration += pygame.Vector2(acceleration)\n\n def add_force(self, force: Vectorizable):\n self.acceleration += pygame.Vector2(force) / self.mass\n","repo_name":"Damus666/PGPU","sub_path":"src/pgpu/component/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"23896353833","text":"# For heavy recursion mathematical problem, python is really really bad\nimport sys\nimport os\nsys.setrecursionlimit(10000001);\n\nimport math\nimport filecmp\nimport fileinput\nimport cmath\nimport cmd\nimport operator\nimport collections\n\n\ndef freopen(f, option, stream):\n oldf = open(f, option)\n oldfd = oldf.fileno()\n newfd = stream.fileno()\n os.close(newfd)\n os.dup2(oldfd, newfd)\n\n\n# freopen(\"in.txt\", \"r\", sys.stdin)\n# freopen(\"out.txt\",\"w\",sys.stdout)\n\n# Reading two integers, m and n\nm, n = map(int, input().split())\n\n# printing their summation\nprint(m + n)\n\n# printing them seperately\nprint(m, n)\n\n# printing them along with another string\nprint(\"m =\", m, \" n =\", n)\n# the ',' operator itself prints another space\n# if you don't want a space then simply do this\ns = str(\"m=\" + str(m) + \"n=\" + str(n))\nprint(s)\n\n# python print() itself prints a newline, if you want to suppress this newline\nprint(\"Hi \", end='') # end='' suppresses the newline\nprint(\"Hellow!! See ?? No newline made\")\n\n# ****************************************************************************************\n# FORMAT SPECIFIER\n# if you wanted to print the integer using format specifier\nprint(\"%d\" % (m + n), end='')\nprint(\" is the sum of %d %d\" % (m, n)) # The different variables for all the specifiers %(v1,v2,.....,vn)\n\n# now to read m number of inputs of type int\narr = list(map(int, input().split()))\n\nfor i in range(0, n, +1):\n print(arr[i], \"where i=\", i, type(arr[i]))\n # In python the intendation is a major thing\n\n# Declaring a 1 dimensional array of length ln\nln = 10\na = [None] * ln\n\n# Declaring a 2 dimensional array with row=3, col=5\nrow = 3\ncol = 5\nmatrix = [[None for _ in range(col)] for _ in range(row)]\nmatrix[0][0] = 111\nprint(matrix[0][0])\n\n# If you have to read unknown number of integers from a line\narr = list(map(int, input().split()))\narray_size = len(arr)\n\n# If you want to read until EOF\nwhile True:\n try:\n # For one variable input in a line don't use map(type, input())\n # use variable = type(input())\n n = int(input())\n print(n)\n except EOFError:\n break\n\n# If you wanted to have a main function like c++\n# Declare all your methods first\n# then write\n# def main():\n#\t#what main should do\n\n# if __name__ == '__main__':\n#\tmain()\n","repo_name":"ahmed-fahim/CurrentTechStack","sub_path":"Python_Own_Cheat_Sheets/Python-3-Basic-Syntax/IO1.py","file_name":"IO1.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"4904275871","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 13 09:39:29 2020\n\n@author: z0024094\n\"\"\"\n\nfrom sklearn.svm import SVC\nfrom sklearn.datasets import load_digits\ndigits = load_digits()\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.25, random_state=0)\n\nsvmClassifierLinear = SVC(kernel='linear')\n\n# we train the SVC model with a linear kernel\nsvmClassifierLinear.fit(x_train, y_train)\n\ny_predictionsSVMLinear = svmClassifierLinear.predict(x_test)\n\nscoreSVMLinear = svmClassifierLinear.score(x_test, y_predictionsSVMLinear)\nprint(scoreSVMLinear)","repo_name":"YufengJin/Python-machine-learning-in-7-days","sub_path":"Section 3/image_classification_svm2.py","file_name":"image_classification_svm2.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"43200140923","text":"import logging\nimport time\nimport openmldb.dbapi\n\nlog = logging.getLogger(__name__)\n\n\nclass ServerChecker:\n def __init__(self, conf_dict, print_sdk_log):\n self.conf_dict = conf_dict\n self.db_name = \"__test_db_xxx_aaa_diagnostic_tool__\"\n self.table_name = \"__test_table_xxx_aaa_diagnostic_tool__\"\n connect_args = (\n {}\n ) # {'database': self.db_name} the db is not guaranteed to exist\n if not print_sdk_log:\n connect_args[\"zkLogLevel\"] = 0\n connect_args[\"glogLevel\"] = 2\n\n if conf_dict[\"mode\"] == \"cluster\":\n connect_args[\"zk\"] = conf_dict[\"zookeeper\"][\"zk_cluster\"]\n connect_args[\"zkPath\"] = conf_dict[\"zookeeper\"][\"zk_root_path\"]\n else:\n connect_args[\"host\"], connect_args[\"port\"] = conf_dict[\"nameserver\"][0][\n \"endpoint\"\n ].split(\":\")\n self.db = openmldb.dbapi.connect(**connect_args)\n self.cursor = self.db.cursor()\n\n def parse_component(self, component_list):\n component_map = {}\n for (endpoint, component, _, status, role) in component_list:\n component_map.setdefault(component, [])\n component_map[component].append((endpoint, status))\n return component_map\n\n def check_status(self, component_map):\n for component, value_list in component_map.items():\n for endpoint, status in value_list:\n if status != \"online\":\n log.warning(f\"{component} endpoint {endpoint} is offline\")\n\n def check_startup(self, component_map):\n for component in [\"nameserver\", \"tablet\", \"taskmanager\"]:\n if self.conf_dict[\"mode\"] != \"cluster\":\n if component == \"taskmanager\":\n continue\n if len(self.conf_dict[component]) > 1:\n log.warning(f\"{component} number is greater than 1\")\n\n for item in self.conf_dict[component]:\n endpoint = item[\"endpoint\"]\n has_found = False\n for cur_endpoint, _ in component_map[component]:\n if endpoint == cur_endpoint:\n has_found = True\n break\n if not has_found:\n log.warning(f\"{component} endpoint {endpoint} has not startup\")\n\n def check_component(self):\n result = self.cursor.execute(\"SHOW COMPONENTS;\").fetchall()\n component_map = self.parse_component(result)\n self.check_status(component_map)\n self.check_startup(component_map)\n\n def is_exist(self, data, name):\n for item in data:\n if item[0] == name:\n return True\n return False\n\n def get_job_status(self, job_id):\n try:\n result = self.cursor.execute(\"SHOW JOB {};\".format(job_id)).fetchall()\n return result[0][2]\n except Exception as e:\n log.warning(e)\n return None\n\n def check_offline_select(self) -> bool:\n if \"taskmanager\" not in self.conf_dict:\n log.info(\"no taskmanager installed. skip job test\")\n return True\n self.cursor.execute(\"SET @@execute_mode='offline';\")\n # make sure it's an async job, to get the job id, not the select result\n self.cursor.execute(\"SET @@sync_job=false;\")\n result = self.cursor.execute(\n \"SELECT * FROM {};\".format(self.table_name)\n ).fetchall()\n if len(result) != 1:\n log.warning(f\"result is invalid, expect a job info: {result}\")\n return False\n # see system table JOB_INFO schema, job id idx is 0\n job_id, try_time = result[0][0], 0\n # it's an empty table in offline, shouldn't wait too long for final states\n while try_time < 60:\n time.sleep(2)\n status = self.get_job_status(job_id)\n if status is None:\n return False\n elif status == \"FINISHED\":\n return True\n elif status == \"FAILED\":\n log.warning(f\"job execute failed, check job {job_id}\")\n return False\n try_time += 1\n return False\n\n def run_test_sql(self) -> bool:\n self.check_component()\n self.cursor.execute(\"CREATE DATABASE IF NOT EXISTS {};\".format(self.db_name))\n result = self.cursor.execute(\"SHOW DATABASES;\").fetchall()\n if not self.is_exist(result, self.db_name):\n log.warning(\"create database failed\")\n return False\n self.cursor.execute(\"USE {};\".format(self.db_name)).fetchall()\n # If table exists, recreate it, to avoid online check failed.\n # If we added deployment test later, delete deployments too\n if self.table_name in self.cursor.get_tables(self.db_name):\n log.info(\"table exists, recreate it\")\n self.cursor.execute(f\"drop table {self.table_name}\")\n self.cursor.execute(\n f\"CREATE TABLE {self.table_name} (col1 string, col2 string);\"\n )\n result = self.cursor.execute(\"SHOW TABLES;\").fetchall()\n if not self.is_exist(result, self.table_name):\n log.warning(\"create table failed\")\n return False\n\n flag = True\n # offline check\n if self.conf_dict[\"mode\"] == \"cluster\" and not self.check_offline_select():\n flag = False\n\n # online check\n self.cursor.execute(\"SET @@execute_mode='online';\")\n self.cursor.execute(\n \"INSERT INTO {} VALUES ('aa', 'bb');\".format(self.table_name)\n )\n result = self.cursor.execute(\n \"SELECT * FROM {};\".format(self.table_name)\n ).fetchall()\n if len(result) != 1:\n log.warning(\"check select data failed\")\n flag = False\n\n self.cursor.execute(\"DROP TABLE {};\".format(self.table_name))\n result = self.cursor.execute(\"SHOW TABLES;\").fetchall()\n if self.is_exist(result, self.table_name):\n log.warning(f\"drop table {self.table_name} failed\")\n flag = False\n # precheck to avoid the exception of dropping db with tables\n if self.cursor.get_tables(self.db_name):\n log.warning(f\"{self.db_name} has tables, skip dropping db\")\n else:\n self.cursor.execute(\"DROP DATABASE {};\".format(self.db_name))\n result = self.cursor.execute(\"SHOW DATABASES;\").fetchall()\n if self.is_exist(result, self.db_name):\n log.warning(f\"drop database {self.db_name} failed\")\n flag = False\n return flag\n","repo_name":"v-wx-v/OpenMLDB","sub_path":"python/openmldb_tool/diagnostic_tool/server_checker.py","file_name":"server_checker.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"91"} +{"seq_id":"12833112826","text":"from tele.applets.account_inter_company_rules.tests.common import TestInterCompanyRulesCommon\n\n\nclass TestInterCompanyRulesCommonSOPO(TestInterCompanyRulesCommon):\n\n @classmethod\n def setUpClass(cls):\n super(TestInterCompanyRulesCommonSOPO, cls).setUpClass()\n # Set warehouse on company A\n cls.company_a.warehouse_id = cls.env['stock.warehouse'].search([('company_id', '=', cls.company_a.id)])\n\n # Set warehouse on company B\n cls.company_b.warehouse_id = cls.env['stock.warehouse'].search([('company_id', '=', cls.company_b.id)])\n\n cls.res_users_company_a.groups_id += cls.env.ref('sales_team.group_sale_salesman') + cls.env.ref('purchase.group_purchase_user')\n cls.res_users_company_b.groups_id += cls.env.ref('sales_team.group_sale_salesman') + cls.env.ref('purchase.group_purchase_user')\n","repo_name":"soikat9/tdoo","sub_path":"applets/sale_purchase_inter_company_rules/tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"8500692891","text":"from typing import Dict\nfrom gi.repository import GLib, Gio\nimport json\n\ndef get_old_dest_file():\n \"\"\"\n Create / find the obsolete file used to store lights.\n \"\"\"\n\n data_dir = GLib.get_user_config_dir()\n dest = GLib.build_filenamev([data_dir, \"lights.json\"])\n return Gio.File.new_for_path(dest)\n\ndef get_dest_file():\n \"\"\"\n Create / find the file used to store lights.\n \"\"\"\n\n data_dir = GLib.get_user_config_dir()\n dest = GLib.build_filenamev([data_dir, \"ambience.json\"])\n return Gio.File.new_for_path(dest)\n\ndef get_config(dest_file):\n \"\"\"\n Loads the config file into a dictionary.\n \"\"\"\n try:\n (success, content, tag) = dest_file.load_contents()\n return json.loads(content.decode(\"utf-8\"))\n except GLib.GError as error:\n # File doesn't exist\n dest_file.create(Gio.FileCopyFlags.NONE, None)\n except (TypeError, ValueError) as e:\n # File is most likely empty\n print(\"Config file empty\")\n return {\"groups\":[]}\n\ndef remove_light_from_group(config, mac):\n for group_idx in range(len(config[\"groups\"])):\n for light in config[\"groups\"][group_idx][\"lights\"]:\n if light[\"mac\"] == mac:\n config[\"groups\"][group_idx][\"lights\"].remove(light)\n break\n \n if len(config[\"groups\"][group_idx][\"lights\"]) == 0:\n config[\"groups\"].pop(group_idx)\n\n return config\n\ndef add_light_to_group(config, label, light):\n group_index = -1\n for group in range(len(config[\"groups\"])):\n if config[\"groups\"][group][\"label\"] == label:\n group_index = group \n break\n\n if group_index > -1:\n exists = False\n for l in config[\"groups\"][group_index][\"lights\"]:\n if l[\"ip\"] == light[\"ip\"] and l[\"mac\"] == light[\"mac\"]:\n exists = True\n break\n\n if not exists:\n config[\"groups\"][group_index][\"lights\"].append(light)\n else:\n config[\"groups\"].append({\"label\": label, \"lights\": [light]})\n return config\n\ndef write_config(config, dest_file):\n permissions = 0o664\n if GLib.mkdir_with_parents(dest_file.get_parent().get_path(), permissions) == 0:\n (success, _) = dest_file.replace_contents(str.encode(json.dumps(config)), None, False, Gio.FileCreateFlags.REPLACE_DESTINATION, None)\n\n if not success:\n print(\"Unable to save config file\")\n else:\n print(\"Unable to create required directory/ies for config file\")\n\ndef convert_old_config():\n print(\"Converting old config file...\")\n\n old = get_config(get_old_dest_file())\n new = get_config(get_dest_file())\n\n for l in old:\n light = Light(l[\"mac\"], l[\"ip\"])\n\n try:\n group = light.get_group_label()\n new = add_light_to_group(new, group, l)\n except WorkflowException:\n in_new = False\n for group in new[\"groups\"]:\n for light in group[\"lights\"]:\n if l[\"mac\"] == light[\"mac\"] and l[\"ip\"] and light[\"ip\"]:\n in_new = True\n break\n\n if not in_new:\n new = add_light_to_group(new, \"Unknown Group\", l)\n\n write_config(new, get_dest_file())\n\ndef move_old_config():\n data_dir = GLib.get_user_config_dir()\n dest = GLib.build_filenamev([data_dir, \"lights.json.bak\"])\n target = Gio.File.new_for_path(dest)\n get_old_dest_file().move(target, Gio.FileCopyFlags.NONE, None, None, None)","repo_name":"LukaJankovic/Ambience","sub_path":"src/ambience_settings.py","file_name":"ambience_settings.py","file_ext":"py","file_size_in_byte":3496,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"91"} +{"seq_id":"34771846373","text":"\"\"\" exceptions.py\n\nEXCEPTION? Errors detected during program execution.\n\nHOW EXECUTION HAPPENS?\n TRY CLAUSE:\n - Statements in try block are executed, If no exception occurs, except block is skipped.\n - A try statement may have more than one except clause, to specify handlers for\n different exceptions, At most one handler will be executed.\n EXCEPT CLAUSE:\n - If an exception occurs during execution of the try clause, the rest of the try block is skipped\n - if exception's type matches the exception named after the except keyword, that except clause is executed.\n UNHANDLED EXCEPTION:\n - If an exception occurs which does not match the exception named in\n the except clause, it is called unhandled exception and execution stops.\n ELSE CLAUSE(optional):\n - When this is present this must follow all except clauses.\n - Why used? if some code in except block generates error, then else will catch\n\n\nRAISING EXCEPTION?\n - raise statement allows programmer to force a specified exception to occur.\n example:\n a. >> raise NameError('HiThere') #Argument in raise() is the exception to be raised \n Traceback (most recent call last):\n File \"\", line 1, in \n NameError: HiThere \n b. raise ValueError # shorthand for 'raise ValueError()'\n\n\nUSER-DEFINED EXCEPTION?\n- You can create your own exceptions by creating a new exception class.\n- This should be derived from 'Exception' class.\n\ntry:\n ...\nexcept:\n ...\nexcept ZeroDivisionError as arg: #arg is Exception argument\n ...\nexcept (RuntimeError, TypeError, NameError):\n ...\nelse #optional\n ...\nfinally #is executed under all circumstances\n ...\n\nFINALLY:\n - This is executed under all circumstances\n - In real world applications, the finally clause is useful for releasing\n external resources (such as files or network connections), regardless\n of whether the use of the resource was successful.\n - The finally clause runs whether or not the try statement produces an exception\n - If a finally clause includes a return statement, the returned value will\n be the one from the finally clause’s return statement, not the value from\n the try clause’s return statement\n\"\"\"\n\n#1. ZeroDivisionError\ntry:\n print (1/0)\n print(\"Here\") #Will not be printed\nexcept ZeroDivisionError:\n print(\"In exception\")\n\n\n#2. Multiple except statements\ntry:\n f = open ('a.txt')\n s = f.readline()\n i = int(s.strip())\nexcept IOError:\n print(\"IOError\") #O/P=> IPERROR\nexcept ValueError:\n print(\"ValueError\")\nexcept:\n print (\"Generic exception\")\n\n\n#3. else clause\ntry:\n f = open('a.txt')\nexcept OSError:\n print('cannot open')\nelse:\n print(arg, 'has', len(f.readlines()), 'lines')\n f.close()\n\n\n#4. exception argument\ndef this_fails():\n x = 1/0\ntry:\n this_fails()\nexcept ZeroDivisionError as err:\n print('run-time:', err) #run-time: division by zero\n\n\n#5. raise the exception\n\"\"\"\ntry:\n raise NameError('HiThere')\nexcept NameError:\n print('Hello!')\n raise \n\"\"\"\n\"\"\" O/P:\nHello!\nTraceback (most recent call last):\n File \"exceptions.py\", line 86, in \n raise NameError('HiThere')\nNameError: HiThere\n\"\"\"\n\n\n#6. User-defined Exceptions\nclass AmitCustomException(Exception):\n def __init__(self, a):\n self.a = a\n def __str__(self):\n run(repr(self.a))\n\ntry:\n raise(AmitCustomException(3*2))\nexcept AmitCustomException as e:\n print('Amit Custom Exception',e.a)\n\"\"\" O/P:\nAmit Custom Exception 6\n\"\"\"\n\n\n#7. finally clause\n#finally clause will execute as last task before try statement completes.\n#The finally clause runs whether or not the try statement produces an exception\n#If a finally clause includes a return statement, the returned value will be the one from the \n#finally clause’s return statement, not the value from the try clause’s return statement\n#try:\n# raise KeyboardInterrupt\n#finally:\n# print('Being stronger')\n\"\"\"\nO/P:\nBeing stronger\nTraceback (most recent call last):\n File \"exceptions.py\", line 125, in \n raise KeyboardInterrupt\nKeyboardInterrupt\n\"\"\"\n\n#If the try statement reaches a break, continue or return statement, the finally clause\n#will execute just prior to the break, continue or return statement’s execution.\ndef test():\n try:\n return True\n finally:\n return False\ntest() #False \n","repo_name":"amitkumar50/Code-examples","sub_path":"Languages/ScriptingLanguages/Python/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"91"} +{"seq_id":"499121192","text":"#4)Programa que declare una lista y la vaya llenando de números hasta que introduzcamos un número negativo. Entonces se debe imprimir el vector (sólo los elementos introducidos).\n\n#Creo mi lista donde guardare mis numeros ingresados por teclado\nnumeros_positivos= []\nnum=0\n\nprint(\"En caso de que quiera dejar de introducir numeros, ingrese un numero negativo\")\n\n#Aplicamos un while que me permita agregar un numero a mi lista numeros[]\nwhile num >= 0:\n num= int(input(\"Ingrese un numero:\"))\n\n if num < 0:\n break\n elif num >0:\n #Se agrega num a la lista\n numeros_positivos.append(num)\n#Imprimo por pantalla el resultado de mi lista\nprint(\"Los numeros que si pertenecen a la lista son: {0}\".format(numeros_positivos))\n\n\n\n\n\n","repo_name":"AnaVerduguez/PG-2C2021","sub_path":"Guia6/Ejercicio4.py","file_name":"Ejercicio4.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"28502352063","text":"# Author Caozy\nfrom datetime import datetime, timedelta\nimport time\n\nfrom info import constants, db\nfrom info.models import User, News, Category\nfrom info.response_code import RET\nfrom info.utils.image_storage import storage\nfrom . import admin_blu\nfrom info.utils.common import user_login_data\nfrom flask import render_template, url_for, request, redirect, current_app, session, g, jsonify\n\n\n@admin_blu.route('/login', methods=['GET', 'POST'])\n@user_login_data\ndef admin_login():\n '''后台登录视图'''\n if request.method == 'GET':\n return render_template('admin/login.html')\n\n else:\n username = request.form.get('username')\n password = request.form.get('password')\n\n if not all([username, password]):\n return render_template('admin/login.html', errmsg='缺少参数')\n try:\n user = User.query.filter(User.nick_name == username).first()\n except Exception as e:\n current_app.logger.error(e)\n return render_template('admin/login.html', errmsg='查询数据库错误')\n if not user:\n return render_template('admin/login.html', errmsg='无此用户')\n\n if not user.check_passowrd(password):\n return render_template('admin/login.html', errmsg='用户名或密码错误')\n if not user.is_admin:\n return render_template('admin/login.html', errmsg='该用户无权限登录')\n\n session['user_id'] = user.id\n session['nick_name'] = username\n session['mobile'] = user.mobile\n if user.is_admin:\n session['is_admin'] = True\n return redirect(url_for('admin.admin_index'))\n\n\n@admin_blu.route('/index')\n@user_login_data\ndef admin_index():\n '''后端主界面函数'''\n user = g.user\n if not user:\n return redirect(url_for('admin.admin_login'))\n\n context = {\n 'user': user.to_dict() if user else None\n }\n\n return render_template('admin/index.html', context=context)\n\n\n@admin_blu.route('/user_count')\n@user_login_data\ndef user_count():\n '''用户数量统计页'''\n # 用户总数,不包括超级管理员\n users_count = None\n try:\n users_count = User.query.filter(User.is_admin != True).count()\n except Exception as e:\n current_app.logger.error(e)\n # 用户月新增数\n mon_count = 0\n now = time.localtime()\n try:\n mon_begin = '%d-%02d-01' % (now.tm_year, now.tm_mon)\n mon_begin_date = datetime.strptime(mon_begin, '%Y-%m-%d')\n mon_count = User.query.filter(User.is_admin != True, User.create_time > mon_begin_date).count()\n except Exception as e:\n current_app.logger.error(e)\n\n # 用户日新增数\n day_count = 0\n try:\n day_begin = '%d-%02d-%02d' % (now.tm_year, now.tm_mon, now.tm_mday)\n day_begin_date = datetime.strptime(day_begin, '%Y-%m-%d')\n day_count = User.query.filter(User.is_admin != True, User.create_time > day_begin_date).count()\n except Exception as e:\n current_app.logger.error(e)\n\n # 查询图表信息,获取到当天00:00:00时间\n now_date = datetime.strptime(datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d')\n # 定义空数组,保存数据\n active_date = []\n active_count = []\n\n # 依次添加数据,再反转\n for i in range(0, 31):\n begin_date = now_date - timedelta(days=i)\n end_date = now_date - timedelta(days=(i - 1))\n active_date.append(begin_date.strftime('%Y-%m-%d'))\n count = 0\n try:\n count = User.query.filter(User.is_admin == False, User.last_login >= begin_date,\n User.last_login < end_date).count()\n except Exception as e:\n current_app.logger.error(e)\n active_count.append(count)\n active_date.reverse()\n active_count.reverse()\n data = {\n 'users_count': users_count,\n 'mon_count': mon_count,\n 'day_count': day_count,\n 'active_date': active_date,\n 'active_count': active_count\n\n }\n return render_template('admin/user_count.html', data=data)\n\n\n@admin_blu.route('/user_list')\n@user_login_data\ndef user_list():\n '''用户列表'''\n page = request.args.get('page', 1)\n try:\n page = int(page)\n except Exception as e:\n page = 1\n user_page = User.query.order_by(User.last_login.desc()).paginate(page=page,\n per_page=constants.ADMIN_USER_PAGE_MAX_COUNT)\n users = user_page.items\n current_page = user_page.page\n total_page = user_page.pages\n user_list = []\n for user in users:\n user_list.append(user.to_admin_dict())\n\n data = {\n 'user_list': user_list,\n 'current_page': current_page,\n 'total_page': total_page\n }\n return render_template('admin/user_list.html', data=data)\n\n\n@admin_blu.route('/news_review', methods=['GET', 'POST'])\n@user_login_data\ndef news_review():\n '''新闻审核'''\n\n if request.method == 'GET':\n page = request.args.get('page', 1)\n keywords = request.args.get(\"keywords\", \"\")\n try:\n page = int(page)\n except Exception as e:\n page = 1\n\n filters=[]\n if keywords:\n filters.append(News.title.contains(keywords))\n news_page = News.query.filter(*filters).order_by(News.update_time.desc()).paginate(page=page,\n per_page=constants.ADMIN_NEWS_PAGE_MAX_COUNT)\n news = news_page.items\n current_page = news_page.page\n total_page = news_page.pages\n new_list = []\n for new in news:\n new_list.append(new.to_review_dict())\n data = {\n 'new_list': new_list,\n 'current_page': current_page,\n 'total_page': total_page\n }\n return render_template('admin/news_review.html', data=data)\n\n\n@admin_blu.route('/news_review_detail', methods=['GET', 'POST'])\n@user_login_data\ndef news_review_detail():\n '''新闻审核详情'''\n if request.method == 'GET':\n new_id = request.args.get('new_id')\n if not new_id:\n return render_template('admin/news_review.html', errmsg='错误')\n try:\n new = News.query.get(new_id)\n except Exception as e:\n current_app.logger.error(e)\n return render_template('admin/news_review.html', errmsg='数据库错误')\n data = {'new': new.to_dict()}\n return render_template('admin/news_review_detail.html', data=data)\n else:\n new_id = request.json.get('news_id')\n action = request.json.get('action')\n\n if not all([new_id, action]):\n return jsonify(errno=RET.PARAMERR, errmsg='参数不全')\n if action not in ('accept', 'reject'):\n return jsonify(errno=RET.PARAMERR, errmsg='参数错误')\n try:\n new = News.query.get(new_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(RET.DBERR, errmsg='数据库错误')\n if not new:\n return jsonify(RET.NODATA, errmsg='无此数据')\n if action == 'accept':\n new.status = 0\n else:\n reason = request.json.get('reason')\n if not reason:\n return jsonify(errno=RET.PARAMERR, errmsg='参数不全')\n new.status = -1\n new.reason = reason\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR, errmsg='数据库错误')\n return jsonify(errno=RET.OK, errmsg=\"操作成功\")\n\n\n@admin_blu.route('/news_edit')\n@user_login_data\ndef news_edit():\n '''新闻编辑'''\n try:\n page = request.args.get('page', 1)\n page = int(page)\n except Exception as e:\n page = 1\n keywords=request.args.get('keywords','')\n filters=[]\n if keywords:\n filters.append(News.title.contains(keywords))\n new_page = News.query.filter(*filters).order_by(News.update_time.desc()).paginate(page=page,\n per_page=constants.ADMIN_NEWS_PAGE_MAX_COUNT)\n news = new_page.items\n current_page = new_page.page\n total_page = new_page.pages\n\n new_list = []\n for new in news:\n new_list.append(new.to_review_dict())\n\n data = {\n 'new_list': new_list,\n 'current_page': current_page,\n 'total_page': total_page\n }\n\n return render_template('admin/news_edit.html', data=data)\n\n\n@admin_blu.route('/news_edit_detail', methods=['GET', 'POST'])\n@user_login_data\ndef news_edit_detail():\n '''新闻编辑详情'''\n if request.method == 'GET':\n new_id = request.args.get('new_id')\n if not new_id:\n return render_template('admin/news_edit_detail.html', errmsg='参数传入错误')\n try:\n new = News.query.get(new_id)\n except Exception as e:\n current_app.logger.error(e)\n return render_template('admin/news_edit_detail.html', errmsg='数据库查询错误')\n filters=[Category.id!=1]\n categorys=Category.query.filter(*filters).all()\n category_list=[]\n for category in categorys:\n category_dict=category.to_dict()\n category_dict['is_selected']=False\n if category.id==new.category_id:\n category_dict['is_selected']=True\n category_list.append(category_dict)\n\n data = {'new_info': new.to_dict(),\n 'categorys':category_list\n }\n\n return render_template('admin/news_edit_detail.html', data=data)\n\n\n else:\n new_id=request.form.get('new_id')\n title=request.form.get('title')\n category=request.form.get('category')\n digest=request.form.get('digest')\n new_image=request.files.get('new_image')\n content=request.form.get('content')\n if not all([new_id,title,category,digest,new_image,content]):\n return jsonify(errno=RET.PARAMERR,errmsg='参数不全')\n try:\n new=News.query.get(new_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg='数据库错误')\n if not new:\n return jsonify(errno=RET.NODATA,errmsg='无此数据')\n try:\n data=new_image.read()\n path=storage(data)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DATAERR,errmsg='获取数据失败')\n new.title = title\n new.source = '个人'\n new.category_id = category\n new.digest = digest\n new.index_image_url = constants.QINIU_DOMIN_PREFIX + path\n new.content = content\n # new.user_id = g.user.id\n new.status = 1 # 0通过/1审核中/-1未用过\n\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR,errmsg='数据保存失败')\n return jsonify(errno=RET.OK, errmsg=\"编辑成功\")\n\n\n@admin_blu.route('/news_type',methods=['GET','POST'])\n@user_login_data\ndef news_type():\n '''新闻分类管理'''\n if request.method=='GET':\n try:\n categorys=Category.query.filter(Category.id!=1).all()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg='数据库错误')\n category_list=[]\n for category in categorys:\n category_list.append(category.to_dict())\n # category_list.pop(0)\n data={\n 'categorys':category_list\n }\n\n return render_template('admin/news_type.html',data=data)\n else:\n # 编辑分类\n category_id=request.json.get('id')\n category_name=request.json.get('name')\n if not category_id:\n # 可用于新增\n category=Category()\n category.name=category_name\n try:\n db.session.add(category)\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR,errmsg='数据库错误')\n return jsonify(errno=RET.OK,errmsg='OK')\n if not category_name:\n return jsonify(errno=RET.PARAMERR,errmsg='参数错误')\n try:\n category =Category.query.get(category_id)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR,errmsg='数据库错误')\n if not category:\n return jsonify(errno=RET.NODATA,errmsg='无数据')\n # 编辑数据\n category.name=category_name\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n db.session.rollback()\n return jsonify(errno=RET.DBERR,errmsg='数据库错误')\n return jsonify(errno=RET.OK, errmsg='OK')","repo_name":"returnes/information","sub_path":"info/modules/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"91"} +{"seq_id":"37971227433","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#Functions list that now acutally wrap. The third value are the return\n#type, if it exists, or if I'm able to translate from C code :)\nimport constants as CO\nimport ctypes as C\n\nFUNCTION_LIST = ( \n \n #General funtions\n ('FreeImage_Initialise', '@4'), \n ('FreeImage_DeInitialise', '@0'),\n ('FreeImage_GetVersion', '@0', None, C.c_char_p), \n ('FreeImage_GetCopyrightMessage', '@0', None, C.c_char_p), \n ('FreeImage_SetOutputMessage', '@4'),\n \n #Bitmap management functions\n ('FreeImage_Allocate', '@24', CO.COL_1TO32),\n ('FreeImage_AllocateT', '@28'),\n ('FreeImage_Load', '@12'),\n ('FreeImage_LoadU', '@12'),\n ('FreeImage_LoadFromHandle','@16'),\n ('FreeImage_Save', '@16'),\n ('FreeImage_SaveU', '@16'),\n ('FreeImage_SaveToHandle', '@20'),\n ('FreeImage_Clone', '@4'),\n ('FreeImage_Unload', '@4'),\n \n #Bitmap information\n ('FreeImage_GetImageType', '@4'),\n ('FreeImage_GetColorsUsed', '@4', CO.COL_1TO32 ),\n ('FreeImage_GetBPP', '@4'),\n ('FreeImage_GetWidth', '@4'),\n ('FreeImage_GetHeight', '@4'),\n ('FreeImage_GetLine', '@4'),\n ('FreeImage_GetPitch', '@4'),\n ('FreeImage_GetDIBSize', '@4'),\n ('FreeImage_GetPalette', '@4', CO.COL_1TO32, \n C.POINTER(CO.RGBQUAD) ),\n ('FreeImage_GetDotsPerMeterX', '@4'),\n ('FreeImage_GetDotsPerMeterY', '@4'),\n ('FreeImage_SetDotsPerMeterX', '@8'), \n ('FreeImage_SetDotsPerMeterY', '@8'),\n ('FreeImage_GetInfoHeader', '@4', CO.COL_1TO32,\n C.POINTER(CO.PBITMAPINFOHEADER)),\n ('FreeImage_GetColorType', '@4', CO.COL_1TO32 ),\n ('FreeImage_GetRedMask', '@4', CO.COL_1TO32 ),\n ('FreeImage_GetGreenMask', '@4', CO.COL_1TO32 ),\n ('FreeImage_GetBlueMask', '@4', CO.COL_1TO32 ),\n ('FreeImage_GetTransparencyCount', '@4', CO.COL_1TO32 ),\n ('FreeImage_GetTransparencyTable', '@4', (CO.COL_8,), C.POINTER(CO.BYTE)),\n ('FreeImage_SetTransparencyTable', '@12', (CO.COL_8,) ),\n ('FreeImage_SetTransparent', '@8', (CO.COL_8, CO.COL_32) ),\n ('FreeImage_IsTransparent', '@4', CO.COL_1TO32 ),\n ('FreeImage_HasBackgroundColor', '@4', (CO.COL_8, CO.COL_24, CO.COL_32) ),\n ('FreeImage_GetBackgroundColor', '@8', (CO.COL_8, CO.COL_24, CO.COL_32),\n C.POINTER(CO.RGBQUAD) ),\n ('FreeImage_SetBackgroundColor', '@8', (CO.COL_8, CO.COL_24, CO.COL_32) ),\n \n #Filetype functions\n ('FreeImage_GetFileType', '@8'), \n ('FreeImage_GetFileTypeU', '@8'),\n ('FreeImage_GetFileTypeFromHandle', '@12'), \n \n \n #Pixel access\n ('FreeImage_GetBits', '@4', None, C.POINTER(CO.BYTE)),\n ('FreeImage_GetScanLine', '@8', None, C.POINTER(CO.BYTE)),\n ('FreeImage_GetPixelIndex', '@16', CO.COL_1TO8 ),\n ('FreeImage_SetPixelIndex', '@16', CO.COL_1TO8 ),\n ('FreeImage_GetPixelColor', '@16', CO.COL_16TO32 ),\n ('FreeImage_SetPixelColor', '@16', CO.COL_16TO32 ),\n\n #Conversion / Trasformation\n ('FreeImage_ConvertTo4Bits', '@4', CO.COL_1TO32),\n ('FreeImage_ConvertTo8Bits', '@4', CO.COL_1TO32),\n ('FreeImage_ConvertToGreyscale', '@4', CO.COL_1TO32),\n ('FreeImage_ConvertTo16Bits555', '@4', CO.COL_1TO32),\n ('FreeImage_ConvertTo16Bits565', '@4', CO.COL_1TO32),\n ('FreeImage_ConvertTo24Bits', '@4', CO.COL_1TO48),\n ('FreeImage_ConvertTo32Bits', '@4', CO.COL_1TO32),\n ('FreeImage_ColorQuantize', '@8', (CO.COL_24,)),\n ('FreeImage_ColorQuantizeEx', '@20', (CO.COL_24,)),\n ('FreeImage_Threshold', '@8', CO.COL_1TO32),\n ('FreeImage_Dither', '@8', CO.COL_1TO32),\n ('FreeImage_ConvertFromRawBits', '@36', CO.COL_1TO32),\n ('FreeImage_ConvertToRawBits', '@32', CO.COL_1TO32),\n ('FreeImage_ConvertToStandardType', '@8'),\n ('FreeImage_ConvertToType', '@12'),\n ('FreeImage_ConvertToRGBF', '@4', (CO.COL_24, CO.COL_32,)),\n \n #Copy / Paste / Composite routines\n ('FreeImage_Copy', '@20'),\n ('FreeImage_Paste', '@20', CO.COL_1TO32),\n \n #Plugin\n ('FreeImage_GetFIFCount', '@0'),\n ('FreeImage_SetPluginEnabled', '@8'),\n ('FreeImage_FIFSupportsReading', '@4'), \n ('FreeImage_GetFIFFromFilename', '@4'),\n ('FreeImage_GetFIFFromFilenameU', '@4'),\n ('FreeImage_FIFSupportsExportBPP', '@8'),\n ('FreeImage_FIFSupportsExportType', '@8'),\n ('FreeImage_FIFSupportsICCProfiles', '@4'),\n ('FreeImage_FIFSupportsWriting', '@4'),\n ('FreeImage_IsPluginEnabled', '@4'),\n ('FreeImage_RegisterLocalPlugin', '@20'), \n ('FreeImage_GetFIFDescription', '@4', None, C.c_char_p),\n ('FreeImage_GetFIFExtensionList', '@4', None, C.c_char_p),\n ('FreeImage_GetFIFFromFormat', '@4', None, C.c_char_p),\n ('FreeImage_GetFIFFromMime', '@4', None, C.c_char_p),\n ('FreeImage_GetFIFMimeType', '@4', None, C.c_char_p),\n ('FreeImage_GetFIFRegExpr', '@4', None, C.c_char_p),\n ('FreeImage_GetFormatFromFIF', '@4', None, C.c_char_p),\n \n #Upsampling / downsampling\n ('FreeImage_Rescale', '@16', CO.COL_1TO32 ),\n ('FreeImage_MakeThumbnail', '@12', CO.COL_1TO32 ),\n \n #Rotation and flipping\n ('FreeImage_RotateClassic', '@12', CO.COL_1TO32),\n ('FreeImage_RotateEx', '@48', (CO.COL_8, CO.COL_24, CO.COL_32), ),\n\n \n #Color manipulation\n ('FreeImage_AdjustBrightness', '@12', (CO.COL_8, CO.COL_24, CO.COL_32), CO.BOOL),\n ('FreeImage_AdjustCurve', '@12', (CO.COL_8, CO.COL_24, CO.COL_32), CO.BOOL),\n ('FreeImage_AdjustGamma', '@12', (CO.COL_8, CO.COL_24, CO.COL_32), CO.BOOL),\n ('FreeImage_AdjustContrast', '@12', (CO.COL_8, CO.COL_24, CO.COL_32), CO.BOOL),\n ('FreeImage_GetHistogram', '@12', (CO.COL_8, CO.COL_24, CO.COL_32), CO.BOOL),\n ('FreeImage_Invert', '@4', CO.COL_1TO32, CO.BOOL), \n ('FreeImage_GetChannel', '@8', (CO.COL_24, CO.COL_32)),\n ('FreeImage_SetChannel', '@12', (CO.COL_24, CO.COL_32)),\n ('FreeImage_GetComplexChannel', '@8'),\n ('FreeImage_SetComplexChannel', '@12'),\n \n #Multipage\n ('FreeImage_OpenMultiBitmap', '@24'), \n ('FreeImage_AppendPage', '@8'), \n ('FreeImage_CloseMultiBitmap', '@8'), \n ('FreeImage_GetPageCount', '@4'),\n ('FreeImage_LockPage', '@8'), \n ('FreeImage_UnlockPage', '@12'),\n ('FreeImage_InsertPage', '@12'),\n ('FreeImage_DeletePage', '@8'),\n ('FreeImage_MovePage', '@12'),\n ('FreeImage_GetLockedPageNumbers', '@12'),\n \n #Tag\n ('FreeImage_GetTagValue', '@4'), \n ('FreeImage_GetTagDescription', '@4', None, C.c_char_p), \n ('FreeImage_TagToString', '@12', None, C.c_char_p),\n ('FreeImage_GetTagCount', '@4', None, CO.DWORD),\n ('FreeImage_GetTagKey', '@4', None, C.c_char_p),\n ('FreeImage_GetTagID', '@4', None, C.c_char_p),\n ('FreeImage_GetTagType', '@4'),\n \n \n #Metadata\n ('FreeImage_GetMetadata', '@16'), \n ('FreeImage_GetMetadataCount', '@8', None, CO.DWORD),\n ('FreeImage_FindFirstMetadata', '@12', None, CO.VOID),\n ('FreeImage_FindNextMetadata', '@8', None, CO.VOID),\n ('FreeImage_FindCloseMetadata', '@4'),\n \n ('FreeImage_IsLittleEndian', '@0')\n \n # --------------- This functions don't work yet :(\n \n #All handle functions...\n \n)\n","repo_name":"glennpierce/Fia","sub_path":"Wrappers/FreeImagePy/funct_list.py","file_name":"funct_list.py","file_ext":"py","file_size_in_byte":7823,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"89"} +{"seq_id":"39263444171","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nclass ValueNetwork(nn.Module):\n def __init__(self, state_size,seed=0):\n super(ValueNetwork, self).__init__()\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n self.hidden1 = nn.Linear(state_size, 256)\n self.hidden2 = nn.Linear(256,256)\n self.value = nn.Linear(256, 1)\n\n def forward(self, states):\n x = F.relu(self.hidden1(states))\n x = F.relu(self.hidden2(x))\n return self.value(x)\n\nclass Critic(nn.Module):\n def __init__(self, state_size, action_size, seed=0):\n super(Critic, self).__init__()\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n self.fc1 = nn.Linear(state_size+action_size, 256)\n self.fc2 = nn.Linear(256, 256)\n self.fc3 = nn.Linear(256, 1)\n\n def forward(self, state, action):\n x = torch.cat((state, action), dim=1)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n return self.fc3(x)\n\nclass Actor(nn.Module):\n def __init__(self, state_size, action_size, seed=0, log_std_min=-20, log_std_max=2):\n super(Actor, self).__init__()\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n self.log_std_min = log_std_min\n self.log_std_max = log_std_max\n\n self.fc1 = nn.Linear(state_size, 256)\n self.fc2 = nn.Linear(256, 256)\n\n self.mu = nn.Linear(256, action_size)\n self.log_std = nn.Linear(256, action_size)\n\n def forward(self, x):\n x = F.relu(self.fc1(x), inplace=True)\n x = F.relu(self.fc2(x), inplace=True)\n mu = self.mu(x)\n log_std = self.log_std(x)\n log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)\n std = log_std.exp()\n return mu, std\n\n def evaluate(self, state):\n mu, std = self.forward(state)\n # print(mu.size())\n distribution = Normal(0, 1)\n epsilon = distribution.sample().to(device)\n action = torch.tanh(mu+epsilon*std)\n log_prob = Normal(mu, std).log_prob(mu + epsilon * std) - torch.log(1 - action.pow(2) + 1e-6)\n log_prob = log_prob.sum(-1,keepdim=True)\n return action, log_prob\n \n def get_action(self,state):\n mu,log_std = self.forward(state)\n std = log_std.exp()\n distribution = Normal(0,1)\n epsilon = distribution.sample().to(device)\n action = torch.tanh(mu+epsilon*std).cpu()\n return action\n \n def deterministic_action(self,state):\n mu,log_std = self.forward(state)\n action = torch.tanh(mu).cpu()\n return action","repo_name":"Bumgeun96/SAC_algorithm","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"33789657317","text":"from functools import lru_cache\n\n\nclass Solution:\n # backtrack\n # tc = O(n^n), sc = O(n)\n def canJump1(self, nums):\n N = len(nums)\n\n def backtrack(ind):\n if ind == N - 1:\n return True\n if nums[ind] == 0:\n return False\n max_jump = ind + nums[ind]\n for jump in range(ind + 1, max_jump + 1):\n if jump < N and backtrack(jump):\n return True\n return False\n\n return backtrack(0)\n\n # dp top down\n # tc = O(n*n), sc = O(n)+O(n) stack with dp\n def canJump2(self, nums):\n N = len(nums)\n dp = [-1] * N\n\n def backtrack(ind):\n if ind == N - 1:\n return True\n if nums[ind] == 0:\n return False\n if dp[ind] != -1:\n return dp[ind]\n max_jump = ind + nums[ind]\n for jump in range(ind + 1, max_jump + 1):\n if jump < N and backtrack(jump):\n dp[ind] = True\n return dp[ind]\n dp[ind] = False\n return dp[ind]\n\n return backtrack(0)\n\n def canJump3(self, nums):\n n = len(nums)\n\n @lru_cache(None)\n def dp(i):\n if i == n - 1:\n return True\n\n for j in range(i + 1, min(i + nums[i], n - 1) + 1):\n if dp(j):\n return True\n return False\n\n return dp(0)\n\n # dp tabulation\n # tc O(n^2), sc O(n)\n def canJump4(self, nums):\n n = len(nums)\n dp = [False] * n\n dp[n - 1] = True\n for i in range(n - 2, -1, -1):\n # min(n, i+nums[i]+1) if we cant reach to next pos\n # then inner for loop will not execute and dp of\n # that pos will remains False always so all pos\n # prev to that will remain False\n for j in range(i + 1, min(n, i + nums[i] + 1)):\n if dp[j]:\n dp[i] = True\n break\n return dp[0]\n\n # greedy\n # tc O(n), sc O(1)\n def canJump5(self, nums):\n goal = len(nums) - 1\n for i in range(len(nums) - 2, -1, -1):\n if i + nums[i] >= goal:\n goal = i\n\n if goal == 0:\n return True\n else:\n return False\n","repo_name":"keerthanmp21/leetcode","sub_path":"dp/jump_game.py","file_name":"jump_game.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"26883889404","text":"# Dongseok Seo, Dohyeon Kim\n\nimport random\nimport operator\nimport numpy as np\n\nclass Card:\n def __init__(self, kind, number):\n self.kind = kind\n self.number = number\n def __str__(self):\n return '{0}:{1}'.format(self.kind, self.number)\n\nclass Player:\n def __init__(self):\n self.cards = []\n\n def printCards(self):\n for card in self.cards:\n print(card.kind + \" \" + str(card.number))\n\nclass Rummy:\n def __init__(self, playerCnt, distCardCnt):\n self.distCardCnt = distCardCnt\n self.playerCnt = playerCnt\n self.cards = []\n self.players = []\n self.generateCards()\n self.shuffleCards()\n self.createPlayers()\n self.remainder = []\n self.temp_cards = []\n # self.field = np.zeros((4, 13), dtype = int)\n self.field = np.full((4, 13), -1, dtype = int)\n self.field2 = np.zeros((4, 13), dtype = int)\n # self.field2 = [[0,0,0,0,5,0,0,0,0,0,0,0,0],[0,0,3,0,5,0,0,0,0,0,0,0,0],[0,0,3,0,5,0,0,0,0,0,0,0,0],[0,0,3,0,0,0,0,0,0,0,0,0,0]]\n\n\n def generateCards(self):\n self.cards = []\n kinds = ['spade', 'heart', 'diamond', 'clover']\n for i in range(4):\n for j in range(13):\n card = Card(kinds[i], j + 1)\n self.cards.append(card)\n return\n\n def shuffleCards(self):\n random.shuffle(self.cards)\n\n def createPlayers(self):\n for j in range(self.playerCnt):\n player = Player()\n self.players.append(player)\n\n def printCards(self):\n for card in self.cards:\n print(card.kind + \"\" + str(card.number))\n\n def playCards(self):\n for i in range(self.distCardCnt):\n for j in range(self.playerCnt):\n card = self.cards.pop()\n self.players[j].cards.append(card)\n\n for k in range(self.playerCnt):\n self.players[k].cards = sorted(self.players[k].cards, key=operator.attrgetter('kind', 'number'))\n\n def printPlayerCards(self):\n player_num = 1\n for player in self.players:\n print(\"player\", player_num, \":\\n\")\n player.printCards()\n player_num += 1\n\n def putCards(self):\n self.reamainder = []\n player_cnt = 0\n\n for i in range(len(self.players)):\n if len(self.players[i].cards) == 0:\n return False\n if len(self.cards) == 0:\n return False\n\n for player in self.players:\n player_cnt += 1\n if player_cnt == 5:\n player_cnt = 1\n print(\"player:\", player_cnt)\n #ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ1. put the sequence of numbersㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#\n getter = 0\n self.temp_cards = []\n for card in player.cards:\n self.temp_cards.append((card.kind, card.number))\n # self.temp_cards = [('clover',3), ('spade',5)]\n # self.temp_cards = [('spade', 5)]\n prev_card_type = None\n prev_card_number = None\n cnt = 0\n cnt2 = 0\n end_idx = 0\n end_idx2 = 0\n for i, (curr_card_t, curr_card_n) in enumerate(self.temp_cards):\n if prev_card_type == curr_card_t and prev_card_number + 1 == curr_card_n:\n cnt += 1\n end_idx = i\n if cnt >= 2:\n getter = 1\n break\n if prev_card_type == curr_card_t and prev_card_number + 1 != curr_card_n:\n cnt = 0\n if prev_card_type != curr_card_t:\n cnt = 0\n if prev_card_type != curr_card_t:\n cnt = 0\n prev_card_type = curr_card_t\n prev_card_number = curr_card_n\n\n start_idx = end_idx - cnt\n # print(\"sta, end : \", start_idx, end_idx)\n if end_idx - start_idx >= 2:\n self.remainder = self.temp_cards[start_idx:end_idx + 1]\n del self.temp_cards[start_idx:end_idx + 1]\n for i in range(len(self.remainder)):\n item_card = self.remainder.pop()\n if item_card[0] == 'clover':\n self.field[0][item_card[1]-1] = item_card[1]\n if item_card[0] == 'diamond':\n self.field[1][item_card[1]-1] = item_card[1]\n if item_card[0] == 'heart':\n self.field[2][item_card[1]-1] = item_card[1]\n if item_card[0] == 'spade':\n self.field[3][item_card[1]-1] = item_card[1]\n print('=' * 50)\n #ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ2. put the same numberㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#\n self.temp_cards = sorted(self.temp_cards, key=lambda temp_card: temp_card[1])\n prev_card_number = None\n for i, (curr_card_t, curr_card_n) in enumerate(self.temp_cards):\n if prev_card_number == curr_card_n:\n cnt2 += 1\n if cnt2>=2:\n end_idx2 = i\n elif (cnt2 >= 2 and prev_card_number != curr_card_n):\n end_idx2 = i-1\n getter = 1\n break\n else :\n cnt2 = 0\n\n prev_card_number = curr_card_n\n\n start_idx2 = end_idx2 - cnt2\n if end_idx2 - start_idx2 >= 2:\n self.remainder = self.temp_cards[start_idx2:end_idx2 + 1]\n del self.temp_cards[start_idx2:end_idx2 + 1]\n for i in range(len(self.remainder)):\n item_card = self.remainder.pop()\n if item_card[0] == 'clover':\n self.field2[0][item_card[1] - 1] = item_card[1]\n if item_card[0] == 'diamond':\n self.field2[1][item_card[1] - 1] = item_card[1]\n if item_card[0] == 'heart':\n self.field2[2][item_card[1] - 1] = item_card[1]\n if item_card[0] == 'spade':\n self.field2[3][item_card[1] - 1] = item_card[1]\n print('=' * 50)\n\n # ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ3. attach to sequence of numbersㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#\n if(getter == 0):\n for card in self.temp_cards:\n for i in range(0,2):\n if card[0] == 'clover':\n for j in range(0,12):\n if self.field[0][j] == card[1] - 1:\n self.field[0][j+1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n elif self.field[0][j] == card[1] + 1 :\n self.field[0][j-1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n if card[0] == 'diamond':\n for j in range(0,12):\n if self.field[1][j] == card[1] - 1:\n self.field[1][j+1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n elif self.field[1][j] == card[1] + 1:\n self.field[1][j-1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n\n\n if card[0] == 'heart':\n for j in range(0,12):\n if self.field[2][j] == card[1] - 1:\n self.field[2][j+1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n elif self.field[2][j] == card[1] + 1:\n self.field[2][j-1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n\n if card[0] == 'spade':\n for j in range(0,12):\n if self.field[3][j] == card[1] - 1:\n self.field[3][j+1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n elif self.field[3][j] == card[1] + 1:\n self.field[3][j-1] = card[1]\n try:\n idx = self.temp_cards.index(card)\n self.temp_cards.pop(idx)\n getter += 1\n except:\n continue\n\n # ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ4. attach to same numberㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#\n\n # print(self.temp_cards[0][1])\n idx_list = []\n if (getter == 0): \n for card in range(len(self.temp_cards)):\n for i in range(13):\n same_col_cnt = 0\n same_num_cnt = 0\n for j in range(4):\n if self.field2[j][i] != 0:\n same_num_cnt += 1\n same_col_cnt += j\n if same_num_cnt == 3 and i+1==self.temp_cards[card][1]:\n getter += 1\n # print('1')\n self.field2[6-same_col_cnt][i]=self.temp_cards[card][1]\n idx_list.append(card)\n print(idx_list)\n idx_list.sort(reverse=True)\n for item in idx_list:\n self.temp_cards.pop(item)\n\n #ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ#\n if (getter == 0 and len(self.cards) != 0):\n a = self.cards.pop()\n player.cards.append(a)\n self.temp_cards.append((a.kind,a.number))\n if a.kind == 'clover':\n a.kind = '♣'\n if a.kind == 'diamond':\n a.kind = '♦'\n if a.kind == 'heart':\n a.kind = '♥'\n if a.kind == 'spade':\n a.kind = '♠'\n print(\"player \"+ str(player_cnt) + \" got \" + str( (a.kind, a.number) ) )\n player.cards = sorted(player.cards, key=operator.attrgetter('kind', 'number'))\n\n\n if getter != 0:\n for i in range(len(player.cards)):\n if i in range(len(self.temp_cards)):\n player.cards[i] = Card(self.temp_cards[i][0], self.temp_cards[i][1])\n else :\n player.cards.pop()\n\n # self.temp_cards.sort(key=lambda x: x[0])\n self.temp_cards.sort()\n\n for idx, card in enumerate(self.temp_cards):\n if card[0] == 'clover':\n self.temp_cards[idx] = ('♣', card[1])\n if card[0] == 'diamond':\n self.temp_cards[idx] = ('♦', card[1])\n if card[0] == 'heart':\n self.temp_cards[idx] = ('♥', card[1])\n if card[0] == 'spade':\n self.temp_cards[idx] = ('♠', card[1])\n\n\n print(f'remain : {self.field}, \\n{self.field2} \\ntemp_cards: {self.temp_cards}')\n print(\"deck left : \" + str(len(self.cards)))\n print(\"ㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡㅡ\")\n\n if len(self.cards) == 0 or len(self.temp_cards)==0:\n self.endGame(player_cnt)\n print(\"end game\")\n return False\n\n def endGame(self, player):\n player_len = []\n for i in range(len(self.players)):\n player_len.append(len(self.players[i].cards))\n print(\"player \" + str(i + 1) + \" left cards : \" + str(player_len[i]))\n\n winner = []\n\n for i in range(len(self.players)):\n if len(self.players[i].cards) == min(player_len):\n winner.append(self.players[i])\n\n a = min(player_len)\n\n if (len(self.cards) != 0):\n print(f'')\n elif len(winner)== 1:\n print(\"\")\n else :\n print(\"draw : \")\n for j, v in enumerate(player_len):\n if v == a:\n print('player '+str(j+1))\n return False\n\nplayerCnt = 4\ndistCardCnt = 7\ngameCount = 100\nrummy = Rummy(playerCnt, distCardCnt)\n# rummy.printCards()\nrummy.playCards()\nfor i in range(gameCount):\n if rummy.putCards() != 0:\n rummy.putCards()\n if rummy.putCards() == 0:\n break\n","repo_name":"Dohy2703/Seoultech","sub_path":"PythonAlgorithm/FinalProject/Rummy.py","file_name":"Rummy.py","file_ext":"py","file_size_in_byte":14944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"23429415529","text":"#Python 3 defines 63 built-in exceptions\n'''\n BaseException\n ↑\n Exception\n ↑\n LookupError ArithmeticError\n ↑\n ZeroDivisionError\n'''\n'''try:\n x = int(input(\"Enter value \\t: \",))\n answer = 100/x\n print(answer)\nexcept ZeroDivisionError:\n print('Cannot divide by zero')\n\nexcept ValueError:\n print('Must be a number')\n\nexcept:\n print('Something went wrong')'''\n\n\n\ntry:\n answer = 100/0\n \nexcept ZeroDivisionError:\n print('Zeror Division Eroor')\nexcept ArithmeticError:\n print('Arithmentic Error')\n \n\ndef badFunc(x):\n try:\n return x/'HJJH'\n except:\n print('I did it again')\n raise #we are raising an error because so that we handle it when the function is called\n\n\ntry:\n print(badFunc(100))\nexcept ZeroDivisionError:\n print('I see')\nexcept ValueError:\n print('Value error')\nexcept:\n print('Bad data')\n\nprint('END!!')\n\nprint('***********************************\\n\\n\\n')\ndef readint(prompt, min, max):\n x = int(input(prompt))\n try:\n assert (x >= min and x<=max)\n except AssertionError:\n print('the value is not within permitted range (-10..10)')\n \n except ValueError:\n print('wrong input')\n#\n# put your code here\n#\n\nv = readint(\"Enter a number from -10 to 10: \", -10, 10)\n\nprint(\"The number is:\", v)","repo_name":"nyakaz73/pythonPCAP-31-03Practice","sub_path":"PyhtonPCAP-31-03/exceptionPractice.py","file_name":"exceptionPractice.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"41470887759","text":"from src.reseau.types.AccountTagInformation import AccountTagInformation\n\nclass HouseInformationsForSell:\n def __init__(self,input):\n self.skillListIds = []\n _val12 = 0\n self._instanceIdFunc(input)\n self._secondHandFunc(input)\n self._modelIdFunc(input)\n self.ownerTag = AccountTagInformation(input)\n self._hasOwnerFunc(input)\n self._ownerCharacterNameFunc(input)\n self._worldXFunc(input)\n self._worldYFunc(input)\n self._subAreaIdFunc(input)\n self._nbRoomFunc(input)\n self._nbChestFunc(input)\n _skillListIdsLen = input.readUnsignedShort()\n for _i12 in range(0,_skillListIdsLen):\n _val12 = input.readInt()\n self.skillListIds.append(_val12)\n self._isLockedFunc(input)\n self._priceFunc(input)\n \n def _instanceIdFunc(self,input) :\n self.instanceId = input.readInt()\n if(self.instanceId < 0) :\n raise RuntimeError(\"Forbidden value (\" + str(self.instanceId) + \") on element of HouseInformationsForSell.instanceId.\")\n \n def _secondHandFunc(self,input) :\n self.secondHand = input.readBoolean()\n \n def _modelIdFunc(self,input) :\n self.modelId = input.readVarUhInt()\n if(self.modelId < 0) :\n raise RuntimeError(\"Forbidden value (\" + str(self.modelId) + \") on element of HouseInformationsForSell.modelId.\")\n \n def _hasOwnerFunc(self,input) :\n self.hasOwner = input.readBoolean()\n \n def _ownerCharacterNameFunc(self,input) :\n self.ownerCharacterName = input.readUTF()\n \n def _worldXFunc(self,input) :\n self.worldX = input.readShort()\n if(self.worldX < -255 or self.worldX > 255) :\n raise RuntimeError(\"Forbidden value (\" + str(self.worldX) + \") on element of HouseInformationsForSell.worldX.\")\n \n def _worldYFunc(self,input) :\n self.worldY = input.readShort()\n if(self.worldY < -255 or self.worldY > 255) :\n raise RuntimeError(\"Forbidden value (\" + str(self.worldY) + \") on element of HouseInformationsForSell.worldY.\")\n \n def _subAreaIdFunc(self,input) :\n self.subAreaId = input.readVarUhShort()\n if(self.subAreaId < 0) :\n raise RuntimeError(\"Forbidden value (\" + str(self.subAreaId) + \") on element of HouseInformationsForSell.subAreaId.\")\n \n def _nbRoomFunc(self,input) :\n self.nbRoom = input.readByte()\n \n def _nbChestFunc(self,input) :\n self.nbChest = input.readByte()\n \n def _isLockedFunc(self,input) :\n self.isLocked = input.readBoolean()\n \n def _priceFunc(self,input) :\n self.price = input.readVarUhLong()\n if(self.price < 0 or self.price > 9007199254740992) :\n raise RuntimeError(\"Forbidden value (\" + str(self.price) + \") on element of HouseInformationsForSell.price.\")\n\n def resume(self):\n print(\"instanceId :\",self.instanceId)\n print(\"secondHand :\",self.secondHand)\n print(\"modelId :\",self.modelId)\n print(\"hasOwner :\",self.hasOwner)\n print(\"ownerCharacterName :\",self.ownerCharacterName)\n print(\"worldX :\",self.worldX)\n print(\"worldY :\",self.worldY)\n print(\"subAreaId :\",self.subAreaId)\n print(\"nbRoom :\",self.nbRoom)\n print(\"nbChest :\",self.nbChest)\n print(\"isLocked :\",self.isLocked)\n print(\"price :\",self.price)\n self.ownerTag.resume()\n print(\"skillListIds :\",self.skillListIds)\n","repo_name":"jouvev/dofus_tools","sub_path":"src/reseau/types/HouseInformationsForSell.py","file_name":"HouseInformationsForSell.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"19185978105","text":"import os\nimport csv\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef read_file_by_dir(dir_name, keyword):\n count = 0\n file_name_list = []\n file_list = os.listdir(dir_name)\n # print (\"file=\", file_list)\n for file_name in sorted(file_list):\n if file_name.find(\".gitignore\") == -1 and file_name.find(keyword) != -1:\n file_name_list.append(file_name)\n file_num = len(file_name_list)\n print (\"- Get %s files in Dir: %s\" % (file_num, dir_name))\n return file_name_list\n\n\ndef read_file(dir_name, file_name):\n data_output = {}\n afile_name = file_name\n data_output[afile_name] = {}\n sub_file_name = \"%s/%s\" % (dir_name, file_name)\n try:\n with open(sub_file_name) as file:\n rows = csv.DictReader(file)\n for row in rows:\n msname = row[\"msname\"]\n data_output[afile_name][msname] = {}\n data_output[afile_name][msname][\"cpu\"] = {}\n data_output[afile_name][msname][\"memory\"] = {}\n\n with open(sub_file_name) as file:\n rows = csv.DictReader(file)\n for row in rows:\n timestamp = int(row[\"timestamp\"])\n msname = row[\"msname\"]\n data_output[afile_name][msname][\"cpu\"][timestamp] = 0\n data_output[afile_name][msname][\"memory\"][timestamp] = 0\n\n with open(sub_file_name) as file:\n rows = csv.DictReader(file)\n for row in rows:\n timestamp = int(row[\"timestamp\"])\n msname = row[\"msname\"]\n cpu = 0\n memory = 0\n if row.get(\"instance_cpu_usage\"):\n cpu = float(row[\"instance_cpu_usage\"])\n if row.get(\"instance_memory_usage\"):\n memory = float(row[\"instance_memory_usage\"])\n data_output[afile_name][msname][\"cpu\"][timestamp] += cpu\n data_output[afile_name][msname][\"memory\"][timestamp] += memory\n except Exception as e:\n print (\"failed to read files: %s\" % str(e))\n return data_output\n\n\ndef write_data_info(file_name, msname, resource, output):\n file_name1 = \"./alibaba-app-cpu0-11-2021/%s-%s-%s_output.json\" % (file_name, msname, resource)\n if resource == \"memory\":\n file_name1 = \"./alibaba-app-memory0-11-2021/%s-%s-%s_output.json\" % (file_name, msname, resource)\n data = {}\n data[\"title\"] = \"%s_%s\" % (msname, resource)\n data['description'] = \"app=%s, resource=%s\" % (msname, resource)\n data['data'] = output\n try:\n data_output = json.dumps(data, indent=4)\n with open(file_name1, \"w\", encoding='utf-8') as f:\n f.write(data_output)\n except Exception as e:\n print (\"current=\", os.getcwd())\n print (\"failed to write alibaba workloads: %s\" % str(e))\n return False\n return True\n\n\ndef main():\n dir_name = \"./clusterdata/cluster-trace-microservices-v2021/data/MSResource\"\n file_name_list = read_file_by_dir(dir_name, \".csv\")\n print (file_name_list)\n count = 0\n correct_count = 0\n error_count = 0\n data_output = {}\n for i in range(12):\n for file_name in file_name_list:\n if file_name.find(\"MSResource_%s.csv\" % i) != -1:\n print (file_name)\n data_output[i] = read_file(dir_name, file_name)\n\n for afile_name in data_output[0].keys():\n for msname in data_output[0][afile_name].keys():\n for resource in data_output[0][afile_name][msname].keys():\n output = {}\n output1 = []\n for i in range(12):\n file_name = \"MSResource_%s.csv\" % i\n if msname in data_output[i][file_name].keys():\n data_list = list(data_output[i][file_name][msname][resource].values())\n print (i, \"file_name = \", data_output[i].keys(), \"data_length=\", len(data_list))\n output1 += data_list\n else:\n print (\"- Cannot find App(%s)\" % msname)\n data_list = list(np.arange(120))\n output1 += data_list\n print (count, msname, \"data_length\", len(output1))\n res = write_data_info(afile_name, msname, resource, output1)\n count += 1\n if res:\n correct_count += 1\n else:\n error_count += 1\n print (\"Total=\", count, correct_count, error_count)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"maygy-chang/test_timeseries_prediction","sub_path":"data_cleaning/split_alibaba_app_function.py","file_name":"split_alibaba_app_function.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"11956651526","text":"\"\"\"Problem 28: Number spiral diagonals\"\"\"\nimport unittest\n\ndef sum_diagonals(side_length):\n \"\"\"Sums diagonals of a number spiral with given side length.\"\"\"\n total = 25\n bottom_corner = 3\n for s in range(5, side_length+2, 2):\n bottom_corner += 4*s - 10\n for c in range(4):\n total += bottom_corner + (s-1)*c\n return total\n\ndef solution():\n return sum_diagonals(1001)\n\nclass TestFunction(unittest.TestCase):\n def test_summer(self):\n self.assertEqual(sum_diagonals(5), 101)\n\nif __name__ == \"__main__\":\n print(solution())\n unittest.main()\n","repo_name":"mattrid93/ProjectEuler","sub_path":"probs/prob28.py","file_name":"prob28.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"37282770267","text":"def get_digs(num):\n word = []\n for letter in str(num):\n word.append(letter)\n word.sort()\n return word\n\nmaster = []\nn = 0\ndig_count = 1\nsolved = False\nwhile not(solved):\n n += 1\n digs = get_digs(n**3)\n if len(digs) > dig_count:\n dig_count += 1\n master = [digs]\n else:\n master.append(digs)\n if master.count(digs) == 5:\n solved = True\nfor num in range(n+1):\n if get_digs(num**3) == digs:\n print(num, num**3)\n #print(digs)","repo_name":"blocka025/ProjectEuler","sub_path":"p62.py","file_name":"p62.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"33881211386","text":"# Problem 014\n# Longest Collatz sequence\n\nimport eulerlib\n\n\ndef collatz_next_sequence(n):\n if n % 2 == 0:\n return n / 2\n else:\n return 3 * n + 1\n\n\ndef collatz_chain_num(n):\n counter = 0\n while n != 1:\n n = collatz_next_sequence(n)\n counter += 1\n return counter\n\n\nmaxval = [1, 0]\nfor i in range(2, 1000000):\n print(\"Now checking {}\".format(i))\n val = collatz_chain_num(i)\n if val > maxval[1]:\n maxval = [i, val]\n\nprint(maxval)\n","repo_name":"cerilla/py_euler","sub_path":"014.py","file_name":"014.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"34046552239","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 28 12:44:27 2019\n\n@author: juuls\n\"\"\"\n\n\n\nimport pygame\n\nclass PS4Controller(object):\n \"\"\"Class representing the PS4 controller. Pretty straightforward functionality.\"\"\"\n\n controller = None\n axis_data = None\n button_data = None\n\n\n def __init__(self):\n \"\"\"Initialize the joystick components\"\"\"\n \n pygame.init()\n pygame.joystick.init()\n self.controller = pygame.joystick.Joystick(0)\n self.controller.init()\n self.ps4_speed = 0\n\n\n def listen(self):\n \"\"\"Listen for events to happen\"\"\"\n \n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n if event.axis == 0:\n if event.value > 0.1:\n print (event.value)\n if event.value < -0.1:\n print (event.value)\n if event.axis == 3:\n if event.value < -0.09:\n self.ps4_speed = (( event.value * -1) * 3)\n\n \n elif event.type == pygame.JOYBUTTONDOWN:\n if event.button == 1:\n print (\"wow pressed the X button\")\n elif event.type == pygame.JOYBUTTONUP:\n if event.button == 1:\n print (\"he-yump\")\n \n\n\n\n\n#if __name__ == \"__main__\":\nwhile True:\n ps4 = PS4Controller()\n ps4.listen()\n if ps4.ps4_speed == 0:\n pass\n else:\n print ('speed', ps4.ps4_speed)\n \n ","repo_name":"MitchVeenendaal/smarterdam","sub_path":"COPY/ps4_test.py","file_name":"ps4_test.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"38018990992","text":"'''\nSee here: http://stackoverflow.com/questions/28677455/use-tls-and-python-for-authentication\n'''\n\n# tlsclient.py\nfrom twisted.python.filepath import FilePath\nfrom twisted.internet.endpoints import SSL4ClientEndpoint\nfrom twisted.internet.ssl import PrivateCertificate, Certificate, optionsForClientTLS\nfrom twisted.internet.defer import Deferred, inlineCallbacks\nfrom twisted.internet.task import react\nfrom twisted.internet.protocol import Protocol, Factory\n\nimport sys\n\n\nclass SendAnyData(Protocol):\n\n def connectionMade(self):\n self.deferred = Deferred()\n self.transport.write(b\"HELLO\\r\\n\")\n\n def connectionLost(self, reason):\n self.deferred.callback(None)\n\n\n@inlineCallbacks\ndef main(reactor, name):\n pem = FilePath(name.encode(\"utf-8\") + b\".client.private.pem\").getContent()\n caPem = FilePath(b\"ca-private-cert.pem\").getContent()\n clientEndpoint = SSL4ClientEndpoint(\n reactor, u\"localhost\", 4321,\n optionsForClientTLS(u\"the-authority\", Certificate.loadPEM(caPem), PrivateCertificate.loadPEM(pem)),\n )\n proto = yield clientEndpoint.connect(Factory.forProtocol(SendAnyData))\n yield proto.deferred\n\n\nif __name__ == '__main__':\n react(main, sys.argv[1:])\n","repo_name":"damouse/pdservertemp","sub_path":"pdserver/security/tlsclient.py","file_name":"tlsclient.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"41163808202","text":"import sys, os, inspect, struct\nimport math\nimport time\nimport calendar\nfrom datetime import datetime\n\ntestdir = os.path.dirname(__file__)\nif testdir != \"\":\n testdir = testdir + '/'\nsys.path.append(testdir + \"..\")\n\nimport dxapi\n\n# Timebase URL specification, pattern is \"dxtick://:\"\ntimebase = 'dxtick://localhost:8001'\n\ntry:\n # Create timebase connection\n db = dxapi.TickDb.createFromUrl(timebase)\n \n # Open in read-only mode\n db.open(True)\n \n print('Connected to ' + timebase)\n\n # Define name of the stream\n streamKey = 'GEMINI52'\n\n # Get stream from the timebase\n stream = db.getStream(streamKey)\n options = dxapi.SelectionOptions()\n\n # Create cursor using defined message types and entities\n cursor = stream.select(0, options, None, None)\n try:\n #while cursor.next():\n for i in range(0, 20):\n cursor.next()\n message = cursor.getMessage()\n # Message time is Epoch time in nanoseconds\n messageTime = datetime.utcfromtimestamp(message.timestamp/1e9)\n if message.typeName == 'deltix.timebase.api.messages.universal.PackageHeader':\n print(\"================================================\")\n print(\"PackageHeader timestamp: \" + str(messageTime) + \", symbol: \" + message.symbol + \", package type: \" + message.packageType)\n for entry in message.entries:\n if entry.typeName == 'deltix.timebase.api.messages.universal.L2EntryNew':\n print(\"NEW: \" + str(entry.level) + \": \" + entry.side + \" \" + str(entry.size) + \" @ \" + str(entry.price) + \" (\" + entry.exchangeId + \")\")\n elif entry.typeName == 'deltix.timebase.api.messages.universal.L2EntryUpdate':\n print(\"UPDATE [\" + entry.action + \"]: \" + str(entry.level) + \": \" + entry.side + \" \" + str(entry.size) + \" @ \" + str(entry.price) + \" (\" + entry.exchangeId + \")\")\n elif entry.typeName == 'deltix.timebase.api.messages.universal.L1Entry':\n print(\"L1Entry: \" + entry.side + \" \" + str(entry.size) + \" @ \" + str(entry.price) + \" (\" + entry.exchangeId + \")\")\n elif entry.typeName == 'deltix.timebase.api.messages.universal.TradeEntry':\n print(\"Trade: \" + entry.side + \" \" + str(entry.size) + \" @ \" + str(entry.price) + \" (\" + entry.exchangeId + \")\")\n \n finally:\n # cursor should be closed anyway\n cursor.close()\n cursor = None\n \nfinally:\n # database connection should be closed anyway\n if (db.isOpen()):\n db.close()\n print(\"Connection \" + timebase + \" closed.\")\n","repo_name":"ssh352/TimeBase","sub_path":"python/samples/read_universal_stream.py","file_name":"read_universal_stream.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"74925375649","text":"import pytest\n\n\n@pytest.mark.django_db\ndef test_create_ad(client, access_token, user, category) -> None:\n expected_response = {\n \"id\": 1,\n \"is_published\": False,\n \"name\": \"test ads name\",\n \"price\": 100,\n \"author\": user.pk,\n \"category\": category.pk,\n \"description\": None,\n \"image\": None\n }\n\n data = {\n \"name\": \"test ads name\",\n \"author\": user.pk,\n \"category\": category.pk,\n \"price\": 100\n }\n\n response = client.post(\n \"/ad/\",\n data,\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=\"Bearer \" + access_token\n )\n\n assert response.status_code == 201\n assert response.data == expected_response\n","repo_name":"Andrey-Torlopov/skypro_hw_31","sub_path":"tests/ads/create_ad_tests.py","file_name":"create_ad_tests.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"72392201250","text":"class Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def __repr__(self):\n if not self:\n return ''\n s = f'Node({self.val}'\n if self.left:\n s += f', {repr(self.left)}'\n if self.right:\n s += f', {repr(self.right)}'\n s += ')'\n return s\n\nclass Tree:\n def __init__(self):\n self.root = None\n\n def __repr__(self):\n return repr(self.root)\n\n def min_sum_path(self):\n def min_sum_backtrack(root, curr, path_sums):\n if not root.left and not root.right:\n path_sums.append(curr)\n else:\n for child in root.left, root.right:\n if child:\n curr += child.val\n path_sums = min_sum_backtrack(child, curr, path_sums)\n curr -= child.val\n return path_sums\n \n if not self.root:\n return 0\n\n path_sums = min_sum_backtrack(self.root, self.root.val, [])\n return min(path_sums)\n\nt = Tree()\nt.root = Node(1, Node(2, Node(4), Node(5)), Node(3, Node(6), Node(7)))\nassert repr(t) == 'Node(1, Node(2, Node(4), Node(5)), Node(3, Node(6), Node(7)))'\nassert t.min_sum_path() == 7","repo_name":"josiahadrineda/Daily-Coding-Problems","sub_path":"135_BinaryTreeMinimumPath.py","file_name":"135_BinaryTreeMinimumPath.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"19116656889","text":"import torchmetrics\nimport torch\nimport shutil\nimport os\n\nfrom utils import csvlogger_start, dotdict\n\nfrom models.models_builder import build_segmentor, build_backbone\n\nfrom configs.config_v0 import DataConfig, SegFormerConfig\n\n\ndef get_segmentation_metrics(num_classes, ignore=False):\n \"\"\">>> https://torchmetrics.readthedocs.io/en/stable/references/modules.html#\"\"\"\n if ignore:\n train_iou = torchmetrics.JaccardIndex(\n num_classes=num_classes + 1, average=\"none\", ignore_index=num_classes\n )\n val_iou = torchmetrics.JaccardIndex(\n num_classes=num_classes + 1, average=\"none\", ignore_index=num_classes\n )\n test_iou = torchmetrics.JaccardIndex(\n num_classes=num_classes + 1, average=\"none\", ignore_index=num_classes\n )\n\n train_precision = torchmetrics.Precision(\n num_classes=num_classes + 1,\n average=\"none\",\n ignore_index=num_classes,\n mdmc_average=\"global\",\n )\n\n val_precision = torchmetrics.Precision(\n num_classes=num_classes + 1,\n average=\"none\",\n ignore_index=num_classes,\n mdmc_average=\"global\",\n )\n\n test_precision = torchmetrics.Precision(\n num_classes=num_classes + 1,\n average=\"none\",\n ignore_index=num_classes,\n mdmc_average=\"global\",\n )\n\n val_recall = torchmetrics.Recall(\n num_classes=num_classes + 1,\n average=\"none\",\n ignore_index=num_classes,\n mdmc_average=\"global\",\n )\n\n test_recall = torchmetrics.Recall(\n num_classes=num_classes + 1,\n average=\"none\",\n ignore_index=num_classes,\n mdmc_average=\"global\",\n )\n else:\n train_iou = torchmetrics.JaccardIndex(num_classes=num_classes, average=\"none\")\n val_iou = torchmetrics.JaccardIndex(num_classes=num_classes, average=\"none\")\n\n test_iou = torchmetrics.JaccardIndex(num_classes=num_classes, average=\"none\")\n\n train_precision = torchmetrics.Precision(\n num_classes=num_classes, average=\"none\", mdmc_average=\"global\"\n )\n\n val_precision = torchmetrics.Precision(\n num_classes=num_classes, average=\"none\", mdmc_average=\"global\"\n )\n\n test_precision = torchmetrics.Precision(\n num_classes=num_classes, average=\"none\", mdmc_average=\"global\"\n )\n\n val_recall = torchmetrics.Recall(\n num_classes=num_classes, average=\"none\", mdmc_average=\"global\"\n )\n\n test_recall = torchmetrics.Recall(\n num_classes=num_classes, average=\"none\", mdmc_average=\"global\"\n )\n return (\n train_iou,\n val_iou,\n test_iou,\n train_precision,\n val_precision,\n test_precision,\n val_recall,\n test_recall,\n )\n\n\nclass ExtraModelConfig:\n # model settings\n norm_cfg = dict(type=\"SyncBN\", requires_grad=True)\n find_unused_parameters = True\n\n if SegFormerConfig.model_cls_backbone == \"mit_b5\":\n model = dict(\n type=\"EncoderDecoder\",\n pretrained=\"/data/SSD1/data/weights/\"\n + SegFormerConfig.model_cls_backbone\n + \".pth\",\n backbone=dict(type=\"mit_b5\", style=\"pytorch\"),\n decode_head=dict(\n type=\"SegFormerHead\",\n in_channels=[64, 128, 320, 512],\n in_index=[0, 1, 2, 3],\n feature_strides=[4, 8, 16, 32],\n channels=128,\n dropout_ratio=0.1,\n num_classes=19,\n norm_cfg=norm_cfg,\n align_corners=False,\n decoder_params=dict(embed_dim=768),\n loss_decode=dict(\n type=\"CrossEntropyLoss\", use_sigmoid=False, loss_weight=1.0\n ),\n ),\n )\n else:\n\n model = dict(\n type=\"EncoderDecoder\",\n pretrained=\"/data/SSD1/data/weights/\"\n + SegFormerConfig.model_cls_backbone\n + \".pth\",\n backbone=dict(type=SegFormerConfig.model_cls_backbone, style=\"pytorch\"),\n decode_head=dict(\n type=\"SegFormerHead\",\n in_channels=[32, 64, 160, 256],\n in_index=[0, 1, 2, 3],\n feature_strides=[4, 8, 16, 32],\n channels=128,\n dropout_ratio=0.1,\n num_classes=DataConfig.num_classes,\n norm_cfg=norm_cfg,\n align_corners=False,\n decoder_params=dict(embed_dim=256),\n loss_decode=dict(\n type=\"CrossEntropyLoss\", use_sigmoid=False, loss_weight=1.0\n ),\n ),\n # model training and testing settings\n )\n train_cfg = dotdict(dict())\n # print(\"==>> train_cfg: \", train_cfg)\n test_cfg = dotdict(dict(mode=\"whole\"))\n # print(\"==>> test_cfg: \", test_cfg)\n\n\ndef get_segformer_model():\n\n # model_backbone = build_backbone(ExtraModelConfig.model)\n # print(\"==>> model_backbone: \", model_backbone)\n\n model = build_segmentor(\n ExtraModelConfig.model,\n train_cfg=ExtraModelConfig.train_cfg,\n test_cfg=ExtraModelConfig.test_cfg,\n )\n\n return model\n\n\nif __name__ == \"__main__\":\n model = get_segformer_model()\n print(\"==>> model: \", model)\n","repo_name":"xma24/SegFormer-pytorch","sub_path":"SegFormer-pytorch-cityscapes/model_v0_utils.py","file_name":"model_v0_utils.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"21775511988","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 15:09:44 2020\n\n@author: Prabhanshu Aggarwal\n\"\"\"\n#Revrsed List with O(n) time complexity and O(1) Space Complexity\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next_node = None\n \nclass LinkedList():\n \n def __init__(self):\n self.head = None\n \n def ReverseList(self):\n previous=None\n current=self.head\n \n while(current is not None):\n next_node=current.next_node\n current.next_node=previous #Reverse the list\n previous = current\n current=next_node\n self.head = previous\n \n def printList(self): \n temp = self.head \n while(temp): \n print (temp.value)\n temp = temp.next_node\n \n \nlist1 = LinkedList()\nlist1.head=Node(1)\nb=Node(2)\nc=Node(3)\nd=Node(4)\n\nlist1.head.next_node=b\nb.next_node=c\nc.next_node=d\nprint(\"Initial List : \")\nlist1.printList()\nlist1.ReverseList() \nprint(\"Reversed List : \")\nlist1.printList()","repo_name":"prabhanshu-aggarwal/Data_Structure_and_Algo","sub_path":"LinkedList/Reversed_linked_list.py","file_name":"Reversed_linked_list.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"89"} +{"seq_id":"29654414382","text":"import numpy as np\nfrom vpython import vec\n\n# Sun: https://nssdc.gsfc.nasa.gov/planetary/factsheet/sunfact.html\n# Mercury: https://nssdc.gsfc.nasa.gov/planetary/factsheet/mercuryfact.html\n# Venus: https://nssdc.gsfc.nasa.gov/planetary/factsheet/venusfact.html\n# Earth: https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html\n# Mars: https://nssdc.gsfc.nasa.gov/planetary/factsheet/marsfact.html\n# Jupiter: https://nssdc.gsfc.nasa.gov/planetary/factsheet/jupiterfact.html\n# Saturn: https://nssdc.gsfc.nasa.gov/planetary/factsheet/saturnfact.html\n# Uranus: https://nssdc.gsfc.nasa.gov/planetary/factsheet/uranusfact.html\n# Neptune: https://nssdc.gsfc.nasa.gov/planetary/factsheet/neptunefact.html\n\nSun = {\"ID\": 10, \"name\": \"Sun\", \"mass\": 1988500e24, \"radius\":695700, \"color\": vec( 1, .6, .4)}\nMercury = {\"ID\": 199, \"name\": \"Mercury\", \"mass\": 0.33010e24, \"radius\":2439.7, \"color\": vec(.8, .8, .8)}\nVenus = {\"ID\": 299, \"name\": \"Venus\", \"mass\": 4.8673e24, \"radius\":6051.8, \"color\": vec(.8, .6, 0)}\nEarth = {\"ID\": 399, \"name\": \"Earth\", \"mass\": 5.9722e24, \"radius\":6371.000, \"color\": vec(0, .4, 1)}\nMars = {\"ID\": 499, \"name\": \"Mars\", \"mass\": 0.64169e24, \"radius\":3389.5, \"color\": vec(1, .8, .4)}\nJupiter = {\"ID\": 599, \"name\": \"Jupiter\", \"mass\": 1898.13e24, \"radius\":69911, \"color\": vec(1, .8, .6)}\nSaturn = {\"ID\": 699, \"name\": \"Saturn\", \"mass\": 568.32e24, \"radius\":58232, \"color\": vec(1, .6, 0)}\nUranus = {\"ID\": 799, \"name\": \"Uranus\", \"mass\": 86.811e24, \"radius\":25362, \"color\": vec(.8, 1, 1)}\nNeptune = {\"ID\": 899, \"name\": \"Neptune\", \"mass\": 102.409e24, \"radius\":24622, \"color\": vec(.2, .2, 1)}\n\nnp.save(\"Sun.npy\", Sun)\nnp.save(\"Mercury.npy\", Mercury)\nnp.save(\"Venus.npy\", Venus)\nnp.save(\"Earth.npy\", Earth)\nnp.save(\"Mars.npy\", Mars)\nnp.save(\"Jupiter.npy\", Jupiter)\nnp.save(\"Saturn.npy\", Saturn)\nnp.save(\"Uranus.npy\", Uranus)\nnp.save(\"Neptune.npy\", Neptune)","repo_name":"Yaowun/Planetary_Simulation","sub_path":"data/planets.py","file_name":"planets.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"89"} +{"seq_id":"4787589021","text":"from launchpad_py import LaunchpadBase\r\nfrom threading import Thread, current_thread\r\nfrom networktables import NetworkTables\r\nimport time\r\nimport logging\r\n\r\n\r\n#launchpad mini class, extends the launchpad base\r\nclass LaunchpadMini(LaunchpadBase): \r\n \r\n #initialize launchpad and raise error if not connected\r\n def __init__( self ): #constructor\r\n super().__init__() #running the constructor for launchpad\r\n if not super().Open():\r\n raise RuntimeError(\"Could not connect to the Launchpad.\")\r\n\r\n self.Reset() \r\n self.quit_list = [False, False, 0] #when you hold down button 1 and 8 for 5 seconds it will quit\r\n self.button_state = [] #saves the values of the last button pressed\r\n\r\n self.grid_status = 0\r\n\r\n self.isCleared = False\r\n\r\n '''~~~~~~~~~~~~~~~grid_status~~~~~~~~~~~~~~~~~~~~~~\r\n 0: the main launchpad driving grid\r\n 1: resetting, middle 4x4 is red, if you hit a button within then it changes to self.grid_status = 0\r\n 2: the entire thing is green, alyssa can manually drive\r\n 3: the entire thing is red, alyssa cannot manually drive\r\n\r\n '''\r\n \r\n #button = [x,y,on/off]\r\n\r\n #network tables initializion:\r\n #to see messages from networktables\r\n logging.basicConfig(level=logging.DEBUG) \r\n #connects to 7034 server\r\n NetworkTables.initialize(server=\"10.70.34.2\") \r\n\r\n #creates an instance of the network table\r\n self.net_table = NetworkTables.getTable(\"SmartDashboard\")\r\n #creating a subtable in the network table\r\n self.pos_table = self.net_table.getSubTable(\"miniTable\")\r\n\r\n #reset the launchpad & turn off all LEDs\r\n def Reset( self ):\r\n self.midi.RawWrite( 176, 0, 0 )\r\n\r\n\r\n #-------------------------------------------------------------------------------------\r\n #-- Returns a Launchpad compatible \"color code byte\"\r\n #-- NOTE: In here, number is 0..7 (left..right)\r\n #-------------------------------------------------------------------------------------\r\n def LedGetColor( self, red, green ):\r\n led = 0\r\n \r\n red = min( int(red), 3 ) # make int and limit to <=3\r\n red = max( red, 0 ) # no negative numbers\r\n\r\n green = min( int(green), 3 ) # make int and limit to <=3\r\n green = max( green, 0 ) # no negative numbers\r\n\r\n led |= red\r\n led |= green << 4 \r\n \r\n return led\r\n \r\n #-------------------------------------------------------------------------------------\r\n #-- Controls a grid LED by its raw ; with brightness: 0..3\r\n #-- For LED numbers, see grid description on top of class.\r\n #-------------------------------------------------------------------------------------\r\n def LedCtrlRaw( self, number, red, green ):\r\n\r\n if number > 199:\r\n if number < 208:\r\n # 200-207\r\n self.LedCtrlAutomap( number - 200, red, green )\r\n else:\r\n if number < 0 or number > 120:\r\n return\r\n # 0-120\r\n led = self.LedGetColor( red, green )\r\n self.midi.RawWrite( 144, number, led )\r\n\r\n #-------------------------------------------------------------------------------------\r\n #-- Controls a grid LED by its coordinates and with brightness 0..3\r\n #-------------------------------------------------------------------------------------\r\n def LedCtrlXY( self, x, y, red, green ):\r\n\r\n if x < 0 or x > 8 or y < 0 or y > 8:\r\n return\r\n\r\n if y == 0:\r\n self.LedCtrlAutomap( x, red, green )\r\n \r\n else:\r\n self.LedCtrlRaw( ( (y-1) << 4) | x, red, green )\r\n\r\n #-------------------------------------------------------------------------------------\r\n #-- Controls an automap LED ; with brightness: 0..3\r\n #-- NOTE: In here, number is 0..7 (left..right)\r\n #-------------------------------------------------------------------------------------\r\n def LedCtrlAutomap( self, number, red, green ):\r\n\r\n if number < 0 or number > 7:\r\n return\r\n\r\n # TODO: limit red/green\r\n led = self.LedGetColor( red, green )\r\n \r\n self.midi.RawWrite( 176, 104 + number, led )\r\n\r\n #-------------------------------------------------------------------------------------\r\n #-- all LEDs on\r\n #-- is here for backwards compatibility with the newer \"Mk2\" and \"Pro\"\r\n #-- classes. If it's \"0\", all LEDs are turned off. In all other cases turned on,\r\n #-- like the function name implies :-/\r\n #-------------------------------------------------------------------------------------\r\n def LedAllOn( self, colorcode = None ):\r\n if colorcode == 0:\r\n self.Reset()\r\n else:\r\n self.midi.RawWrite( 176, 0, 127 )\r\n\r\n #-------------------------------------------------------------------------------------\r\n #-- Returns True if a button event was received.\r\n #-------------------------------------------------------------------------------------\r\n def ButtonChanged( self ):\r\n return self.midi.ReadCheck()\r\n\r\n #-------------------------------------------------------------------------------------\r\n #-- Returns the raw value of the last button change as a list:\r\n #-- [